def test_that_equality_and_hash_ignore_order(self): a = katpoint.Catalogue() b = katpoint.Catalogue() t1 = katpoint.Target('Nothing, special') t2 = katpoint.Target('Sun, special') a.add(t1) a.add(t2) b.add(t2) b.add(t1) self.assertEqual(a, b, 'Shuffled catalogues are not equal') self.assertEqual(hash(a), hash(b), 'Shuffled catalogues have different hashes')
def test_construct_catalogue(self): """Test construction of catalogues.""" cat = katpoint.Catalogue(add_specials=True, add_stars=True, antenna=self.antenna) num_targets_original = len(cat) self.assertEqual(num_targets_original, len(katpoint.specials) + 1 + len(ephem.stars.stars), 'Number of targets incorrect') # Add target already in catalogue - no action cat.add(katpoint.Target('Sun, special')) num_targets = len(cat) self.assertEqual(num_targets, num_targets_original, 'Number of targets incorrect') cat2 = katpoint.Catalogue(add_specials=True, add_stars=True) cat2.add(katpoint.Target('Sun, special')) self.assertEqual(cat, cat2, 'Catalogues not equal') try: self.assertEqual(hash(cat), hash(cat2), 'Catalogue hashes not equal') except TypeError: self.fail('Catalogue object not hashable') # Add different targets with the same name cat2.add(katpoint.Target('Sun, special hot')) cat2.add(katpoint.Target('Sun | Sol, special')) self.assertEqual(len(cat2), num_targets_original + 2, 'Number of targets incorrect') cat2.remove('Sol') self.assertEqual(len(cat2), num_targets_original + 1, 'Number of targets incorrect') self.assertTrue(cat != cat2, 'Catalogues should not be equal') test_target = cat.targets[-1] self.assertEqual(test_target.description, cat[test_target.name].description, 'Lookup failed') self.assertEqual(cat['Non-existent'], None, 'Lookup of non-existent target failed') cat.add_tle(self.tle_lines, 'tle') cat.add_edb(self.edb_lines, 'edb') self.assertEqual(len(cat.targets), num_targets + 2, 'Number of targets incorrect') cat.remove(cat.targets[-1].name) self.assertEqual(len(cat.targets), num_targets + 1, 'Number of targets incorrect') closest_target, dist = cat.closest_to(test_target) self.assertEqual(closest_target.description, test_target.description, 'Closest target incorrect') self.assertAlmostEqual(dist, 0.0, places=5, msg='Target should be on top of itself')
def test_catalogue_same_name(self): """"Test add() and remove() of targets with the same name.""" cat = katpoint.Catalogue() targets = ['Sun, special', 'Sun | Sol, special', 'Sun, special hot'] # Add various targets called Sun cat.add(targets[0]) self.assertEqual(cat['Sun'].description, targets[0]) cat.add(targets[0]) self.assertEqual(len(cat), 1, 'Did not ignore duplicate target') cat.add(targets[1]) self.assertEqual(cat['Sun'].description, targets[1]) cat.add(targets[2]) self.assertEqual(cat['Sun'].description, targets[2]) # Check length, iteration, membership self.assertEqual(len(cat), len(targets)) for n, t in enumerate(cat): self.assertEqual(t.description, targets[n]) self.assertIn('Sun', cat) self.assertIn('Sol', cat) for t in targets: self.assertIn(katpoint.Target(t), cat) # Remove targets one by one cat.remove('Sun') self.assertEqual(cat['Sun'].description, targets[1]) cat.remove('Sun') self.assertEqual(cat['Sun'].description, targets[0]) cat.remove('Sun') self.assertTrue( len(cat) == len(cat.targets) == len(cat.lookup) == 0, 'Catalogue not empty')
def test_filter_catalogue(self): """Test filtering of catalogues.""" cat = katpoint.Catalogue(add_specials=True, add_stars=True) cat = cat.filter(tags=['special', '~radec']) self.assertEqual(len(cat.targets), len(katpoint.specials), 'Number of targets incorrect') cat.add(self.flux_target) cat2 = cat.filter(flux_limit_Jy=50.0, flux_freq_MHz=1.5) self.assertEqual(len(cat2.targets), 1, 'Number of targets with sufficient flux should be 1') self.assertNotEqual(cat, cat2, 'Catalogues should be inequal') cat.add(katpoint.Target('Zenith, azel, 0, 90')) cat3 = cat.filter(az_limit_deg=[0, 180], timestamp=self.timestamp, antenna=self.antenna) self.assertEqual(len(cat3.targets), 2, 'Number of targets rising should be 2') cat4 = cat.filter(az_limit_deg=[180, 0], timestamp=self.timestamp, antenna=self.antenna) self.assertEqual(len(cat4.targets), 10, 'Number of targets setting should be 10') cat5 = cat.filter(el_limit_deg=85, timestamp=self.timestamp, antenna=self.antenna) self.assertEqual(len(cat5.targets), 1, 'Number of targets close to zenith should be 1') sun = katpoint.Target('Sun, special') cat6 = cat.filter(dist_limit_deg=[0.0, 1.0], proximity_targets=sun, timestamp=self.timestamp, antenna=self.antenna) self.assertEqual(len(cat6.targets), 1, 'Number of targets close to Sun should be 1')
def test_construct_catalogue(self): """Test construction of catalogues.""" cat = katpoint.Catalogue(add_specials=True, add_stars=True, antenna=self.antenna) cat.add(katpoint.Target('Sun, special')) num_targets = len(cat) self.assertEqual(num_targets, len(katpoint.specials) + 1 + 94, 'Number of targets incorrect') self.assertEqual(cat, cat, 'Catalogue not equal to itself') test_target = cat.targets[0] self.assertEqual(test_target.description, cat[test_target.name].description, 'Lookup failed') self.assertEqual(cat['Non-existent'], None, 'Lookup of non-existent target failed') cat.add_tle(self.tle_lines, 'tle') cat.add_edb(self.edb_lines, 'edb') self.assertEqual(len(cat.targets), num_targets + 2, 'Number of targets incorrect') cat.remove(cat.targets[-1].name) self.assertEqual(len(cat.targets), num_targets + 1, 'Number of targets incorrect') closest_target, dist = cat.closest_to(test_target) self.assertEqual(closest_target.description, test_target.description, 'Closest target incorrect')
def lst2utc(req_lst, ref_location, date=None): def get_lst_range(date): date_timestamp = time.mktime( date.timetuple()) # this will be local time time_range = katpoint.Timestamp(date_timestamp).secs + \ numpy.arange(0, 24.*3600., 60) lst_range = numpy.degrees( target.antenna.local_sidereal_time(time_range)) / 15. return time_range, lst_range req_lst = float(req_lst) cat = katpoint.Catalogue(add_specials=True) cat.antenna = katpoint.Antenna(ref_location) target = cat['Zenith'] if date is None: # find the best UTC for today date = datetime.date.today() else: date = date.replace(hour=0, minute=0, second=0, microsecond=0) [time_range, lst_range] = get_lst_range(date) lst_idx = numpy.abs(lst_range - req_lst).argmin() if lst_range[lst_idx] < req_lst: x = lst_range[lst_idx:lst_idx + 2] y = time_range[lst_idx:lst_idx + 2] else: x = lst_range[lst_idx - 1:lst_idx + 1] y = time_range[lst_idx - 1:lst_idx + 1] linefit = numpy.poly1d(numpy.polyfit(x, y, 1)) return datetime.datetime.utcfromtimestamp(linefit(req_lst))
def _get_targets(filename): """Quick look function to get the list of targets in a data file. This is intended to be called without createing a full katdal object. Parameters ---------- filename : string Data file name Returns ------- targets : :class:'katpoint.Catalogue' object All targets in file """ f, version = H5DataV2._open(filename) # Use the delay-tracking centre as the one and only target # Try two different sensors for the DBE target try: target_list = f['MetaData/Sensors/DBE/target'] except Exception: # Since h5py errors have varied over the years, we need Exception target_list = f['MetaData/Sensors/Beams/Beam0/target'] all_target_strings = [target_data[1] for target_data in target_list] return katpoint.Catalogue(np.unique(all_target_strings))
def test_catalogue_tab_completion(self): cat = katpoint.Catalogue() cat.add('Nothing, special') cat.add('Earth | Terra Incognita, azel, 0, 0') cat.add('Earth | Sky, azel, 0, 90') # Check that it returns a sorted list self.assertEqual(cat._ipython_key_completions_(), ['Earth', 'Nothing', 'Sky', 'Terra Incognita'])
def test_catalogue_basic(self): """Basic catalogue tests.""" cat = katpoint.Catalogue(add_specials=True) repr(cat) str(cat) cat.add('# Comments will be ignored') with self.assertRaises(ValueError): cat.add([1])
def test_file(self): orig = katpoint.Catalogue([_TRG_A, _TRG_B, _TRG_C]) with tempfile.NamedTemporaryFile('w', suffix='.csv') as f: orig.save(f.name) test1 = open_sky_model(f.name) test2 = open_sky_model('file://' + f.name + '?format=katpoint') assert_equal(orig, test1._catalogue) assert_equal(orig, test2._catalogue)
def collect_targets(kat, args): """Collect targets into katpoint catalogue. Parameters ---------- kat: session kat container-like object """ from_names = from_strings = from_catalogues = num_catalogues = 0 catalogue = katpoint.Catalogue() catalogue.antenna = katpoint.Antenna(_ref_location) setobserver(catalogue.antenna.observer) for arg in args: try: # First assume the string is a catalogue file name count_before_add = len(catalogue) try: catalogue.add(open(arg)) except ValueError: msg = "Catalogue {} contains bad targets".format(arg) user_logger.warning(msg) from_catalogues += len(catalogue) - count_before_add num_catalogues += 1 except IOError: # If the file failed to load, # assume it is a name or description string # With no comma in target string, # assume it's the name of a target # to be looked up in standard catalogue if arg.find(",") < 0: target = kat.sources[arg] if target is None: msg = "Unknown target or catalogue {}, skipping it".format( arg) user_logger.warning(msg) else: catalogue.add(target) from_names += 1 else: # Assume the argument is a target description string try: catalogue.add(arg) from_strings += 1 except ValueError as err: msg = "Invalid target {}, skipping it [{}]".format( arg, err) user_logger.warning(msg) if len(catalogue) == 0: raise ValueError("No known targets found in argument list") msg = ( "Found {} target(s): {} from {} catalogue(s), {} from default catalogue and " "{} as target string(s)".format(len(catalogue), from_catalogues, num_catalogues, from_names, from_strings)) user_logger.info(msg) return catalogue
def test_visibility_list(self): """Test output of visibility list.""" cat = katpoint.Catalogue(add_specials=True, add_stars=True) cat.add(self.flux_target) cat.remove('Zenith') cat.visibility_list(timestamp=self.timestamp, antenna=self.antenna, flux_freq_MHz=1.5, antenna2=self.antenna2) cat.antenna = self.antenna cat.flux_freq_MHz = 1.5 cat.visibility_list(timestamp=self.timestamp)
def collect_targets(kat, args): """Collect targets specified by name, description string or catalogue file. Parameters ---------- kat : :class:`utility.KATCoreConn` object KAT connection object associated with this experiment args : list of strings Argument list containing mixture of target names, description strings and / or catalogue file names Returns ------- targets : :class:`katpoint.Catalogue` object Catalogue containing all targets found Raises ------ ValueError If final catalogue is empty """ from_names = from_strings = from_catalogues = num_catalogues = 0 targets = katpoint.Catalogue(antenna=kat.sources.antenna) for arg in args: try: # First assume the string is a catalogue file name count_before_add = len(targets) try: targets.add(file(arg)) except ValueError: user_logger.warning("Catalogue %r contains bad targets" % (arg,)) from_catalogues += len(targets) - count_before_add num_catalogues += 1 except IOError: # If the file failed to load, assume it is a name or description string # With no comma in target string, assume it's the name of a target to be looked up in standard catalogue if arg.find(',') < 0: target = kat.sources[arg] if target is None: user_logger.warning("Unknown target or catalogue %r, skipping it" % (arg,)) else: targets.add(target) from_names += 1 else: # Assume the argument is a target description string try: targets.add(arg) from_strings += 1 except ValueError, err: user_logger.warning("Invalid target %r, skipping it [%s]" % (arg, err))
def test_completer(self): """Test IPython tab completer.""" # pylint: disable-msg=W0201,W0612,R0903 cat = katpoint.Catalogue(add_stars=True) # Set up dummy object containing user namespace and line to be completed class Dummy(object): pass event = Dummy() event.shell = Dummy() event.shell.user_ns = locals() event.line = "t = cat['Rasal" names = katpoint._catalogue_completer(event, event) self.assertEqual(names, ['Rasalgethi', 'Rasalhague'], 'Tab completer failed')
def lst2utc(req_lst, ref_location, date=None): """Find LST for given date else for Today. Parameters ---------- req_lst: datetime Request LST ref_location: `EarthLocation()` Location on earth where LST is being measured date: datetime Date when LST is being measured Returns ------- time_range: katpoint.Timestamp UTC date and time lst_range: float LST range """ def get_lst_range(date): date_timestamp = time.mktime( date.timetuple()) # this will be local time time_range = katpoint.Timestamp(date_timestamp).secs + numpy.arange( 0, 24.0 * 3600.0, 60) lst_range = numpy.degrees( target.antenna.local_sidereal_time(time_range)) / 15.0 return time_range, lst_range req_lst = float(req_lst) cat = katpoint.Catalogue(add_specials=True) cat.antenna = katpoint.Antenna(ref_location) target = cat["Zenith"] if date is None: # find the best UTC for today date = datetime.date.today() else: date = date.replace(hour=0, minute=0, second=0, microsecond=0) [time_range, lst_range] = get_lst_range(date) lst_idx = numpy.abs(lst_range - req_lst).argmin() if lst_range[lst_idx] < req_lst: x = lst_range[lst_idx:lst_idx + 2] y = time_range[lst_idx:lst_idx + 2] else: x = lst_range[lst_idx - 1:lst_idx + 1] y = time_range[lst_idx - 1:lst_idx + 1] linefit = numpy.poly1d(numpy.polyfit(x, y, 1)) return datetime.datetime.utcfromtimestamp(linefit(req_lst))
def _get_targets(filename): """Quick look function to get the list of targets in a data file. This is intended to be called without creating a complete katdal object. Parameters ---------- filename : string Data file name Returns ------- targets : :class:'katpoint.Catalogue' object All targets in file """ f, version = H5DataV3._open(filename) target_list = f['TelescopeModel/cbf/target'] all_target_strings = [target_data[1] for target_data in target_list] return katpoint.Catalogue(np.unique(all_target_strings))
def _get_targets(filename): """Quick look function to get the list of targets in a data file. This is intended to be called without creating a full katdal object. Parameters ---------- filename : string Data file name Returns ------- targets : :class:'katpoint.Catalogue' object All targets in file """ f, version = H5DataV1._open(filename) compound_scans = f['Scans'] all_target_strings = [to_str(compound_scans[group].attrs['target']) for group in compound_scans] return katpoint.Catalogue(np.unique(all_target_strings))
def test_sort_catalogue(self): """Test sorting of catalogues.""" cat = katpoint.Catalogue(add_specials=True, add_stars=True) self.assertEqual(len(cat.targets), len(katpoint.specials) + 1 + len(ephem.stars.stars), 'Number of targets incorrect') cat1 = cat.sort(key='name') self.assertEqual(cat1, cat, 'Catalogue equality failed') # Ephem 3.7.7.0 added new stars self.assertIn(cat1.targets[0].name, {'Acamar', 'Achernar'}, 'Sorting on name failed') cat2 = cat.sort(key='ra', timestamp=self.timestamp, antenna=self.antenna) self.assertIn(cat2.targets[0].name, {'Alpheratz', 'Sirrah'}, 'Sorting on ra failed') cat3 = cat.sort(key='dec', timestamp=self.timestamp, antenna=self.antenna) self.assertIn(cat3.targets[0].name, {'Miaplacidus', 'Agena'}, 'Sorting on dec failed') cat4 = cat.sort(key='az', timestamp=self.timestamp, antenna=self.antenna, ascending=False) self.assertEqual(cat4.targets[0].name, 'Polaris', 'Sorting on az failed') # az: 359:25:07.3 cat5 = cat.sort(key='el', timestamp=self.timestamp, antenna=self.antenna) self.assertEqual(cat5.targets[-1].name, 'Zenith', 'Sorting on el failed') # el: 90:00:00.0 cat.add(self.flux_target) cat6 = cat.sort(key='flux', ascending=False, flux_freq_MHz=1.5) self.assertTrue( 'flux' in (cat6.targets[0].name, cat6.targets[-1].name), 'Flux target should be at start or end of catalogue after sorting') self.assertTrue((cat6.targets[0].flux_density(1.5) == 100.0) or (cat6.targets[-1].flux_density(1.5) == 100.0), 'Sorting on flux failed')
def setup(self): # Deliberately use a strange subset of polarizations, to test that # the indexing is correct. pols = [polarization.STOKES_I, polarization.STOKES_Q, polarization.STOKES_V] self.image_parameters = parameters.ImageParameters( parameters.FixedImageParameters( polarizations=pols, dtype=np.float64 ), q_fov=1.0, image_oversample=None, frequency=0.2 * units.m, array=None, pixel_size=0.00001, pixels=4096) oversample = 8 w_planes = 100 self.grid_parameters = parameters.GridParameters( parameters.FixedGridParameters( antialias_width=7.0, oversample=oversample, image_oversample=4, max_w=5 * units.m, kernel_width=7), w_slices=10, w_planes=w_planes) catalogue = katpoint.Catalogue([ "dummy0, radec, 19:39:25.03, -63:42:45.7, (200.0 12000.0 -11.11 7.777 -1.231 0 0 0 1 0.1 0 0)", # noqa: E501 "dummy1, radec, 19:39:20.38, -63:42:09.1, (800.0 8400.0 -3.708 3.807 -0.7202 0 0 0 1 0.2 0.2 0.2)", # noqa: E501 "dummy2, radec, 19:39:08.29, -63:42:33.0, (800.0 43200.0 0.956 0.584 -0.1644 0 0 0 1 0.1 0 1)" # noqa: E501 ]) self.model = sky_model.KatpointSkyModel(catalogue) self.phase_centre = katpoint.construct_radec_target( '19:39:30', '-63:42:30').astrometric_radec() self.phase_centre = self.phase_centre * units.rad
def test_telstate(self): client = fakeredis.FakeRedis() telstate = katsdptelstate.TelescopeState( katsdptelstate.redis.RedisBackend(client)) # Fake just enough of telstate to keep katdal happy. This isn't all in the right # namespace, but that doesn't really matter. telstate['stream_name'] = 'sdp_l0' telstate_l0 = telstate.view('sdp_l0') telstate_l0['stream_type'] = 'sdp.vis' telstate_l0['chunk_info'] = { 'correlator_data': { 'prefix': '1234567890-sdp-l0', 'dtype': '<c8', 'shape': (0, 0, 0) } } telstate_l0['sync_time'] = 1234567890.0 telstate_l0['first_timestamp'] = 0.0 telstate_l0['int_time'] = 1.0 _put_models(telstate, '1234567890', 'continuum', [(_TRG_A, [_TRG_A, _TRG_C])]) expected = katpoint.Catalogue([_TRG_A, _TRG_C]) with mock.patch('redis.Redis', return_value=client) as mock_redis: test = open_sky_model( 'redis://invalid:6379/?format=katdal&db=1&capture_block_id=1234567890' '&continuum=continuum' '&target=A,+radec,+20:00:00.00,+-60:00:00.0,+(200.0+12000.0+1.0+0.5+0.0)' ) mock_redis.assert_called_with(host='invalid', port=6379, db=1, socket_timeout=mock.ANY, health_check_interval=mock.ANY) assert_equal(expected, test._catalogue)
opts.dump_rate = 2.0 uniquename = 0 with verify_and_connect(opts) as kat: if len(args) > 0: # Load pointing calibrator catalogues and command line targets pointing_sources = collect_targets(kat, args) else: # Default catalogue contains the radec sources in the standard kat database pointing_sources = kat.sources.filter(tags='radec') user_logger.info( "No valid targets specified, loaded default catalogue with %d targets" % (len(pointing_sources), )) # Remove sources in skip catalogue file, if provided if opts.skip_catalogue is not None and os.path.exists(opts.skip_catalogue): skip_sources = katpoint.Catalogue(file(opts.skip_catalogue)) for target in skip_sources: pointing_sources.remove(target.name) user_logger.info("After skipping, %d targets are left" % (len(pointing_sources), )) # Quit early if there are no sources to observe if len(pointing_sources) == 0: user_logger.warning( "Empty point source catalogue or all targets are skipped") elif len(pointing_sources.filter(el_limit_deg=opts.horizon)) == 0: user_logger.warning( "No targets are currently visible - please re-run the script later" ) else: # Observed targets will be written back to catalogue file, or into the void
def plot_target_selection(f): fig = plt.figure(figsize=(21, 15)) #Find a target to plot f.select(scans='track') check_targets = katpoint.Catalogue([ f.catalogue.targets[t] for t in f.target_indices ]) #Copy the catalogue safely so that we can remove targets in local copy #Any bpcals? if check_targets.filter(tags='bpcal'): check_targets = check_targets.filter(tags='bpcal') #Otherwise gaincal? elif check_targets.filter(tags='gaincal'): check_targets = check_targets.filter(tags='gaincal') #Else just check all targets max_integration = 0 for target in check_targets.targets: f.select(targets=target) if f.vis.shape[0] > max_integration: select_target = target max_integration = f.vis.shape[0] plt.suptitle("Correlation Spectra on " + select_target.name, fontsize=16, fontweight="bold") try: for pol in ('h', 'v'): f.select(targets=select_target, corrprods='cross', pol=pol, scans='track') crosscorr = [(f.inputs.index(inpA), f.inputs.index(inpB)) for inpA, inpB in f.corr_products] #For plotting the power fig.subplots_adjust(wspace=0., hspace=0.) #debug_here() for n, (indexA, indexB) in enumerate(crosscorr): subplot_index = (len(f.ants) * indexA + indexB + 1) if pol == 'h' else (indexA + len(f.ants) * indexB + 1) ax = fig.add_subplot(len(f.ants), len(f.ants), subplot_index) #loop through scans and average individually to remove changes in power over time sum_power = np.zeros(f.vis.shape[1]) #initialise sum for tmp in f.scans(): power = np.abs(f.vis[:, :, n])[:, :, 0] #get average power for this scan (omit first channel) dc_offset = np.mean(power[:, 1:]) sum_power += np.sum(power[:, :] / dc_offset, axis=0) av_power = 10. * np.log10(sum_power / max_integration) ax.plot(f.channel_freqs, av_power) ax.set_xticks([]) ax.set_yticks([]) if pol == 'h': if indexA == 0: ax.xaxis.set_label_position('top') ax.set_xlabel(f.inputs[indexB][3:], size='xx-large') if indexB == len(f.ants) - 1: ax.yaxis.set_label_position('right') ax.set_ylabel(f.inputs[indexA][3:], rotation='horizontal', size='xx-large') else: if indexA == 0: ax.set_ylabel(f.inputs[indexB][3:], rotation='horizontal', size='xx-large') if indexB == len(f.ants) - 1: ax.set_xlabel(f.inputs[indexA][3:], size='xx-large') #plt.savefig(pp,format='pdf') except KeyError, error: print 'Failed to read scans from File: ', f, ' with Key Error:', error
if len(args) > 0: # Load catalogue files or targets if given baseline_sources = collect_targets(kat, args) else: # Prune the standard catalogue to only contain sources that # are good for baseline calibration great_sources = [ '3C123', 'Taurus A', 'Orion A', 'Hydra A', '3C273', 'Virgo A', 'Centaurus A', 'Pictor A' ] good_sources = [ '3C48', '3C84', 'J0408-6545', 'J0522-3627', '3C161', 'J1819-6345', 'J1939-6342', '3C433', 'J2253+1608' ] baseline_sources = katpoint.Catalogue( [kat.sources[src] for src in great_sources + good_sources], antenna=kat.sources.antenna) user_logger.info( "No targets specified, loaded default catalogue with %d targets", len(baseline_sources)) with start_session(kat, **vars(opts)) as session: # Force delay tracking to be off opts.no_delays = True session.standard_setup(**vars(opts)) session.capture_start() start_time = time.time() targets_observed = [] # Keep going until the time is up keep_going = True while keep_going:
def MKContPipeline(files, outputdir, **kwargs): """MeerKAT Continuum pipeline. Parameters ---------- files : list h5 filenames (note: support for multiple h5 files i.e. ConcatenatedDataSet is not currently supported) outputdir : string Directory location to write output data, scratchdir : string, optional The directory location of the aips disk parmFile : string, optional Overwrite the default imaging parameters using this parameter file. """ if len(files) == 1: h5file = files[0] else: h5file = files ############### Initialize katfile object ######################### OK = False # Open the h5 file as a katfile object try: #open katfile and perform selection according to kwargs katdal_ref_ant = kwargs.get('katdal_refant', '') katdal_retries = kwargs.get('katdal_retries', 2) katdal_timeout = kwargs.get('katdal_timeout', 300) katdata = katfile.open(h5file, ref_ant=katdal_ref_ant, timeout=katdal_timeout, retries=katdal_retries) OK = True except Exception as exception: print(exception) if not OK: raise KATUnimageableError("Unable to read MVF data in " + str(h5file)) # If we are doing polcal- search for the most recent delaycal observation if kwargs.get('polcal'): if kwargs.get('delaycal_mvf') is None: # Automatically determine delay_cal CBID delay_katdata = KATGetDelayCal(h5file, katdata, timeout=katdal_timeout, retries=katdal_retries) else: # Use the user supplied one delay_katdata = KATGetDelayCal(kwargs.get('delaycal_mvf')) kwargs["delay_katdata"] = delay_katdata # Die gracefully if we cannot write to the output area... if not os.path.exists(outputdir): print('Specified output directory: ' + outputdir + 'does not exist.') exit(-1) # Obit error logging err = OErr.OErr() #################### Initialize filenames ####################################################### nameRoot = katdata.obs_params.get('capture_block_id', katdata.experiment_id) if type(nameRoot) == list: nameRoot = nameRoot[0] fileRoot = os.path.join(outputdir, nameRoot) # root of file name logFile = fileRoot + ".log" # Processing log file avgClass = ("UVAv")[0:6] # Averaged data AIPS class manifestfile = outputdir + '/manifest.pickle' ############################# Initialize OBIT and AIPS ########################################## noScrat = [] # Logging directly to logFile OErr.PInit(err, 2, logFile) EVLAAddOutFile(os.path.basename(logFile), 'project', 'Pipeline log file') if kwargs.get('reuse'): ObitSys = AIPSSetup.AIPSSetup(err, configfile=kwargs.get('configFile'), scratchdir=kwargs.get('scratchdir'), aipsdisk=kwargs.get('aipsdisk'), overwrite=False) else: ObitSys = AIPSSetup.AIPSSetup(err, configfile=kwargs.get('configFile'), scratchdir=kwargs.get('scratchdir'), aipsdisk=kwargs.get('aipsdisk')) # Get the set up AIPS environment. AIPS_ROOT = os.environ['AIPS_ROOT'] AIPS_VERSION = os.environ['AIPS_VERSION'] nThreads = 72 user = OSystem.PGetAIPSuser() AIPS.userno = user disk = 1 fitsdisk = 1 nam = nameRoot[:10] clss = "Raw" seq = 1 ############### Condition data ######################### #Get calibrator models fluxcals = katpoint.Catalogue( open(FITSDir.FITSdisks[1] + "/PERLEY_BUTLER_2013.csv")) #Condition data (get bpcals, update names for aips conventions etc) KATh5Condition(katdata, fluxcals, err) ############################# Initialise Parameters ########################################## ####### Initialize parameters dictionary ##### parms = KATInitContParms() parms['PolCal'] = kwargs.get('polcal') parms['XYtarg'] = kwargs.get('XYtarg') # Get default XYtarg if it is not set targs = [targ.name for targ in katdata.catalogue.targets] if parms['PolCal']: if parms['XYtarg'] is None: GOTTARG = False for targ in ['1934-638', '0408-65']: if targ in targs: parms['XYtarg'] = targ GOTTARG = True break if not GOTTARG: raise RuntimeError( 'No default targets (1934-638, 0408-65) for XYFix. Cannot run in PolCal mode.' ) else: if parms['XYtarg'] not in targs: raise RuntimeError( 'XYtarg target %s not in observation. Cannot run in PolCal mode.' % (parms['XYtarg'])) ####### User defined parameters ###### if kwargs.get('parmFile'): print("parmFile", kwargs.get('parmFile')) exec(open(kwargs.get('parmFile')).read()) EVLAAddOutFile(os.path.basename(kwargs.get('parmFile')), 'project', 'Pipeline input parameters') ###################### Data selection and static edits ############################################ # Select data based on static imageable parameters KATh5Select(katdata, parms, err, **kwargs) # General AIPS data parameters at script level dataClass = ("UVDa")[0:6] # AIPS class of raw uv data delayClass = "DELA" band = katdata.spectral_windows[0].product #Correlator product project = os.path.basename(os.path.splitext(files[0])[0])[ 0:10] # Project name (12 char or less, used as AIPS Name) outIClass = parms["outIClass"] # image AIPS class debug = parms["debug"] check = parms["check"] ####################### Import data into AIPS ##################################################### # Reuse or nay? sw = katdata.spectral_windows[katdata.spw] # Pick up static flags if sw.band == 'L': sflags = FetchObject(ObitTalkUtil.FITSDir.FITSdisks[fitsdisk] + 'maskred.pickle') if kwargs.get('flag', None): mess = 'Using static RFI mask in file %s for L-band' % ( ObitTalkUtil.FITSDir.FITSdisks[fitsdisk] + 'maskred.pickle', ) printMess(mess, logFile) elif sw.band == 'UHF': sflags = FetchObject(ObitTalkUtil.FITSDir.FITSdisks[fitsdisk] + 'maskredUHF.pickle') if kwargs.get('flag', None): mess = 'Using static RFI mask in file %s for UHF-band' % ( ObitTalkUtil.FITSDir.FITSdisks[fitsdisk] + 'maskredUHF.pickle', ) printMess(mess, logFile) else: sflags = np.zeros(sw.num_chans, dtype=np.bool) sflags = sflags[katdata.channels] # Construct a template uvfits file from master template mastertemplate = ObitTalkUtil.FITSDir.FITSdisks[ fitsdisk] + 'MKATTemplate.uvtab.gz' outtemplate = nam + '.uvtemp' if kwargs.get('reuse'): uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), dataClass, disk, seq, True, err) obsdata = KATH5toAIPS.GetKATMeta(katdata, err) # Extract AIPS parameters of the uv data to the metadata obsdata["Aproject"] = uv.Aname obsdata["Aclass"] = uv.Aclass obsdata["Aseq"] = uv.Aseq obsdata["Adisk"] = disk obsdata["calInt"] = katdata.dump_period obsdata["fitsdisk"] = fitsdisk # TODO: Check if the input data has been Hanned. doneHann = True else: mess = '\nLoading UV data with CBID: %s' % ( katdata.obs_params['capture_block_id'], ) printMess(mess, logFile) KATH5toAIPS.MakeTemplate(mastertemplate, outtemplate, katdata) uv = OTObit.uvlod(outtemplate, 0, EVLAAIPSName(project), clss, disk, seq, err) obsdata = KATH5toAIPS.KAT2AIPS(katdata, uv, disk, fitsdisk, err, calInt=katdata.dump_period, static=sflags, **kwargs) MakeIFs.UVMakeIF(uv, 8, err, solInt=katdata.dump_period) os.remove(outtemplate) if parms["PolCal"]: mess = '\nLoading delay calibration with CBID: %s' % ( delay_katdata.obs_params['capture_block_id'], ) printMess(mess, logFile) # Load the delay cal observation KATH5toAIPS.MakeTemplate(mastertemplate, outtemplate, katdata) delay_uv = OTObit.uvlod(outtemplate, 0, EVLAAIPSName(project), delayClass, disk, seq, err) KATH5toAIPS.KAT2AIPS(delay_katdata, delay_uv, disk, fitsdisk, err, calInt=katdata.dump_period, static=sflags, flag=False) MakeIFs.UVMakeIF(delay_uv, 8, err, solInt=katdata.dump_period) os.remove(outtemplate) # Print the uv data header to screen. uv.Header(err) ############################# Set Project Processing parameters ################################### # Parameters derived from obsdata and katdata KATGetObsParms(obsdata, katdata, parms, logFile) ###### Initialise target parameters ##### KATInitTargParms(katdata, parms, err) # Load the outputs pickle jar EVLAFetchOutFiles() OSystem.PAllowThreads(nThreads) # Allow threads in Obit/oython retCode = 0 doBand = -1 BPVer = 0 maxgap = max(parms["CalAvgTime"], 160. * katdata.dump_period) / 60. ################### Start processing ############################################################### mess = "Start project "+parms["project"]+" AIPS user no. "+str(AIPS.userno)+\ ", KAT7 configuration "+parms["KAT7Cfg"] printMess(mess, logFile) if debug: pydoc.ttypager = pydoc.plainpager # don't page task input displays mess = "Using Debug mode " printMess(mess, logFile) if check: mess = "Only checking script" printMess(mess, logFile) # Log parameters printMess("Parameter settings", logFile) for p in parms: mess = " " + p + ": " + str(parms[p]) printMess(mess, logFile) clist = [] for DCal in parms["DCals"]: if DCal["Source"] not in clist: clist.append(DCal["Source"]) for PCal in parms["PCals"]: if PCal["Source"] not in clist: clist.append(PCal["Source"]) for ACal in parms["ACals"]: if ACal["Source"] not in clist: clist.append(ACal["Source"]) if kwargs.get('targets') is not None: targets = [ targ.name for targ in katdata.catalogue if (targ.name not in clist) and ( targ.name in kwargs.get('targets').split(',')) ] else: targets = [ targ.name for targ in katdata.catalogue if (targ.name not in clist) ] refAnt = kwargs.get('refant') if refAnt is not None: try: SaveObject(obsdata['antLookup'][refAnt], fileRoot + ".refAnt.pickle", True) except: mess = "Select reference antenna " + refAnt + " not in antenna table." printMess(mess, logFile) print(mess) refAnt = FetchObject(fileRoot + ".refAnt.pickle") # Save parameters to pickle jar, manifest ParmsPicklefile = fileRoot + ".Parms.pickle" # Where results saved SaveObject(parms, ParmsPicklefile, True) EVLAAddOutFile(os.path.basename(ParmsPicklefile), 'project', 'Processing parameters used') loadClass = dataClass # Hanning - only if not reusing doneHann = False if not kwargs.get('reuse'): if parms["doHann"]: uv = KATHann(uv, EVLAAIPSName(project), dataClass, disk, seq, err, \ doDescm=parms["doDescm"], flagVer=-1, logfile=logFile, zapin=True, check=check, debug=debug) doneHann = True if parms["PolCal"] and parms["doHann"]: mess = "Hanning delay calibration scan" printMess(mess, logFile) delay_uv = KATHann(delay_uv, EVLAAIPSName(project), delayClass, disk, seq + 1, err, \ doDescm=parms["doDescm"], flagVer=-1, logfile=logFile, zapin=True, check=check, debug=debug) if doneHann: # Halve channels after hanning. parms["selChan"] = int(parms["selChan"] / 2) parms["BChDrop"] = int(parms["BChDrop"] / 2) parms["EChDrop"] = int(parms["EChDrop"] / 2) if uv == None and not check: raise RuntimeError("Cannot Hann data ") # Clear any old calibration/editing if parms["doClearTab"] or kwargs.get('reuse'): mess = "Clear previous calibration" printMess(mess, logFile) EVLAClearCal(uv, err, doGain=parms["doClearGain"], doFlag=parms["doClearFlag"], doBP=parms["doClearBP"], check=check) OErr.printErrMsg(err, "Error resetting calibration") # Copy FG 1 to FG 2 if parms["doCopyFG"]: mess = "Copy FG 1 to FG 2" printMess(mess, logFile) retCode = KATCopyFG(uv, err, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error Copying FG table") # Flag antennas shadowed by others? if parms["doShad"]: retCode = EVLAShadow (uv, err, shadBl=parms["shadBl"], \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error Shadow flagging data") # Median window time editing, for RFI impulsive in time if parms["doMednTD1"]: mess = "Median window time editing, for RFI impulsive in time:" printMess(mess, logFile) retCode = EVLAMedianFlag (uv, clist, err, noScrat=noScrat, nThreads=nThreads, \ avgTime=parms["mednAvgTime"], avgFreq=parms["mednAvgFreq"], chAvg= parms["mednChAvg"], \ timeWind=parms["mednTimeWind"],flagVer=2, flagTab=2,flagSig=parms["mednSigma"], \ logfile=logFile, check=check, debug=False) if retCode != 0: raise RuntimeError("Error in MednFlag") # Median window frequency editing, for RFI impulsive in frequency if parms["doFD1"]: mess = "Median window frequency editing, for RFI impulsive in frequency:" printMess(mess, logFile) retCode = EVLAAutoFlag (uv, clist, err, flagVer=2, flagTab=2, doCalib=-1, doBand=-1, \ timeAvg=parms["FD1TimeAvg"], \ doFD=True, FDmaxAmp=1.0e20, FDmaxV=1.0e20, FDwidMW=parms["FD1widMW"], \ FDmaxRMS=[1.0e20,0.1], FDmaxRes=parms["FD1maxRes"], \ FDmaxResBL= parms["FD1maxRes"], FDbaseSel=parms["FD1baseSel"],\ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in AutoFlag") # Parallactic angle correction? if parms["doPACor"]: retCode = EVLAPACor(uv, err, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Parallactic angle correction") # Need to find a reference antenna? See if we have saved it? if (parms["refAnt"] <= 0): refAnt = FetchObject(fileRoot + ".refAnt.pickle") if refAnt: parms["refAnt"] = refAnt # Use bandpass calibrator and center half of each spectrum if parms["refAnt"] <= 0: mess = "Find best reference antenna: run Calib on BP Cal(s) " printMess(mess, logFile) parms["refAnt"] = EVLAGetRefAnt(uv, parms["BPCals"], err, flagVer=0, \ solInt=parms["bpsolint1"], nThreads=nThreads, \ logfile=logFile, check=check, debug=debug) if err.isErr: raise RuntimeError("Error finding reference antenna") if parms["refAnts"][0] <= 0: parms["refAnts"][0] = parms["refAnt"] mess = "Picked reference antenna " + str(parms["refAnt"]) printMess(mess, logFile) # Save it ParmsPicklefile = fileRoot + ".Parms.pickle" # Where results saved SaveObject(parms, ParmsPicklefile, True) refAntPicklefile = fileRoot + ".refAnt.pickle" # Where results saved SaveObject(parms["refAnt"], refAntPicklefile, True) # Plot Raw, edited data? if parms["doRawSpecPlot"] and parms["plotSource"]: mess = "Raw Spectral plot for: " + ' '.join(parms["BPCal"]) printMess(mess, logFile) plotFile = fileRoot + "_RawSpec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=-1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") EVLAAddOutFile(plotFile, 'project', 'Pipeline log file') if parms["PolCal"]: mess = "XYphase bandpass calibration" printMess(mess, logFile) retCode = KATXPhase(delay_uv, uv, err, logfile=logFile, check=check, debug=debug, doCalib=-1, flagVer=0, doBand=-1, refAnt=parms['refAnt']) doBand = 1 BPVer += 1 if retCode != 0: raise RuntimeError("Error in Xphase calibration") # delay calibration if parms["doDelayCal"] and parms["DCals"] and not check: plotFile = fileRoot + "_DelayCal.ps" retCode = EVLADelayCal(uv, parms["DCals"], err, \ BChan=parms["delayBChan"], EChan=parms["delayEChan"], \ doCalib=-1, flagVer=0, doBand=doBand, BPVer=BPVer, \ solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"], \ refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], doZeroPhs=parms["delayZeroPhs"], \ doAvgIF=parms["delayAvgIF"], doAvgPol=parms["delayAvgPol"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, \ nThreads=nThreads, noScrat=noScrat, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in delay calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_DelaySpec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \ plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=doBand, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # Bandpass calibration if parms["doBPCal"] and parms["BPCals"]: retCode = KATBPCal(uv, parms["BPCals"], err, doBand=doBand, BPVer=BPVer, newBPVer=0, noScrat=noScrat, solInt1=parms["bpsolint1"], \ solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \ BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \ BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \ doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \ UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Bandpass calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_BPSpec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, \ parms["refAnt"], err, Stokes=["RR","LL"], doband=1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # Amp & phase Calibrate if parms["doAmpPhaseCal"]: plotFile = fileRoot + "_APCal.ps" retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], doCalib=2, doBand=1, BPVer=0, flagVer=0, \ BChan=parms["ampBChan"], EChan=parms["ampEChan"], \ solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \ doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \ ampEditFG=parms["ampEditFG"], avgPol=parms["PolCal"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \ nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error calibrating") # More editing if parms["doAutoFlag"]: mess = "Post calibration editing:" printMess(mess, logFile) # if going to redo then only calibrators if parms["doRecal"]: # Only calibrators clist = [] for DCal in parms["DCals"]: if DCal["Source"] not in clist: clist.append(DCal["Source"]) for PCal in parms["PCals"]: if PCal["Source"] not in clist: clist.append(PCal["Source"]) for ACal in parms["ACals"]: if ACal["Source"] not in clist: clist.append(ACal["Source"]) else: clist = [] retCode = EVLAAutoFlag (uv, clist, err, flagVer=0, flagTab =2, \ doCalib=2, gainUse=0, doBand=1, BPVer=BPVer, \ IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \ doFD=parms["doFirstAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \ FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \ FDmaxRes=parms["FDmaxRes"], FDmaxResBL=parms["FDmaxResBL"], \ FDbaseSel=parms["FDbaseSel"], \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in AutoFlag") # Redo the calibration using new flagging? if parms["doBPCal2"] == None: parms["doBPCal2"] = parms["doBPCal"] if parms["doDelayCal2"] == None: parms["doDelayCal2"] = parms["doDelayCal2"] if parms["doAmpPhaseCal2"] == None: parms["doAmpPhaseCal2"] = parms["doAmpPhaseCal"] if parms["doAutoFlag2"] == None: parms["doAutoFlagCal2"] = parms["doAutoFlag"] if parms["doRecal"]: mess = "Redo calibration:" printMess(mess, logFile) EVLAClearCal(uv, err, doGain=True, doFlag=False, doBP=True, check=check, logfile=logFile) OErr.printErrMsg(err, "Error resetting calibration") BPVer = 0 # Parallactic angle correction? if parms["doPACor"]: retCode = EVLAPACor(uv, err, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Parallactic angle correction") # Run MKXPhase on delaycal data and attach BP table to UV data if parms["PolCal"]: mess = "XYphase bandpass calibration" printMess(mess, logFile) retCode = KATXPhase(delay_uv, uv, err, logfile=logFile, check=check, debug=debug, doCalib=-1, flagVer=0, doBand=-1, refAnt=parms['refAnt']) BPVer += 1 if retCode != 0: raise RuntimeError("Error in Xphase calibration") # Delay recalibration if parms["doDelayCal2"] and parms["DCals"] and not check: plotFile = fileRoot + "_DelayCal2.ps" retCode = EVLADelayCal(uv, parms["DCals"], err, \ BChan=parms["delayBChan"], EChan=parms["delayEChan"], \ doCalib=-1, flagVer=0, doBand=doBand, BPVer=BPVer, \ solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"], \ refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], \ doZeroPhs=parms["delayZeroPhs"], \ doAvgIF=parms["delayAvgIF"], doAvgPol=parms["delayAvgPol"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, \ nThreads=nThreads, noScrat=noScrat, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in delay calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_DelaySpec2.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=doband, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # Bandpass calibration if parms["doBPCal2"] and parms["BPCals"]: retCode = KATBPCal(uv, parms["BPCals"], err, doBand=doBand, BPVer=BPVer, newBPVer=0, \ noScrat=noScrat, solInt1=parms["bpsolint1"], \ solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \ BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \ BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \ doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \ UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Bandpass calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_BPSpec2.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # Amp & phase Recalibrate if parms["doAmpPhaseCal2"]: plotFile = fileRoot + "_APCal2.ps" retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], \ doCalib=2, doBand=1, BPVer=0, flagVer=0, \ BChan=parms["ampBChan"], EChan=parms["ampEChan"], \ solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \ doAmpEdit=True, ampSigma=parms["ampSigma"], \ ampEditFG=parms["ampEditFG"], avgPol=parms["PolCal"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \ noScrat=noScrat, nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error calibrating") # More editing if parms["doAutoFlag2"]: mess = "Post recalibration editing:" printMess(mess, logFile) retCode = EVLAAutoFlag (uv, [], err, flagVer=0, flagTab=2, \ doCalib=2, gainUse=0, doBand=1, BPVer=0, \ IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \ doFD=parms["doSecAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \ FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \ FDmaxRes=parms["FDmaxRes"], FDmaxResBL= parms["FDmaxResBL"], \ FDbaseSel=parms["FDbaseSel"], \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in AutoFlag") # end recal # Calibrate and average data # Overwrite avgStokes from command line if kwargs.get('halfstokes'): parms["avgStokes"] = 'HALF' if parms["doCalAvg"] == 'Splat': retCode = KATCalAvg (uv, avgClass, parms["seq"], parms["CalAvgTime"], err, \ flagVer=2, doCalib=2, gainUse=0, doBand=1, BPVer=0, doPol=False, \ avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], Stokes=parms["avgStokes"], \ BChan=1, EChan=parms["selChan"] - 1, doAuto=parms["doAuto"], \ BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in CalAvg") elif parms["doCalAvg"] == 'BL': retCode = KATBLCalAvg (uv, avgClass, parms["seq"], err, \ flagVer=2, doCalib=2, gainUse=0, doBand=1, BPVer=0, doPol=False, \ avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], FOV=parms['FOV'], \ maxInt=min(parms["solPInt"],parms["solAInt"]), Stokes=parms["avgStokes"], \ BChan=1, EChan=parms["selChan"] - 1, timeAvg=parms["CalAvgTime"], \ BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in BLCalAvg") if parms["doSaveTab"]: filename = project + ".CalTab.uvtab" _ = EVLAUVFITSTab(uv, filename, 0, err, logfile=logFile) #Zap unaveraged data if requested if kwargs.get('zapraw'): uv.Zap(err) # Get calibrated/averaged data if not check: uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), avgClass[0:6], \ disk, parms["seq"], True, err) if err.isErr: OErr.printErrMsg(err, "Error creating cal/avg AIPS data") plotFile = fileRoot + "_Spec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \ plotFile, parms["refAnt"], err, \ Stokes=["I"], doband=-1, docalib=-1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # KATUVFITS(uv, 'preimage.uvfits', 0, err, exclude=["AIPS HI", "AIPS SL", "AIPS PL"], # include=["AIPS AN", "AIPS FQ"], compress=parms["Compress"], logfile=logFile) KATUVFITab(uv, project + '.uvtab', 0, err) #Gzip the data? if kwargs.get('gzip'): os.system('pigz -p %d %s' % (nThreads, project + '.uvtab')) os.system('rm -f %s' % (project + '.uvtab'))
def MKContPipeline(files, outputdir, **kwargs): """MeerKAT Continuum pipeline. Parameters ---------- files : list h5 filenames (note: support for multiple h5 files i.e. ConcatenatedDataSet is not currently supported) outputdir : string Directory location to write output data, scratchdir : string, optional The directory location of the aips disk parmFile : string, optional Overwrite the default imaging parameters using this parameter file. """ #if len(files) > 1: # raise TooManyKatfilesException('Processing multiple katfiles are not currently supported') # Onle be concatenated if we have to be if len(files) == 1: h5file = files[0] else: h5file = files # Die gracefully if we cannot write to the output area... if not os.path.exists(outputdir): print('Specified output directory: ' + outputdir + 'does not exist.') exit(-1) # Obit error logging err = OErr.OErr() #################### Initialize filenames ####################################################### fileRoot = os.path.join(outputdir, os.path.basename(os.path.splitext( files[0])[0])) # root of file name logFile = fileRoot + ".log" # Processing log file avgClass = ("UVAv")[0:6] # Averaged data AIPS class manifestfile = outputdir + '/manifest.pickle' ############################# Initialize OBIT and AIPS ########################################## noScrat = [] # Logging directly to logFile OErr.PInit(err, 2, logFile) EVLAAddOutFile(os.path.basename(logFile), 'project', 'Pipeline log file') if kwargs.get('reuse'): ObitSys = AIPSSetup.AIPSSetup(err, configfile=kwargs.get('configFile'), scratchdir=kwargs.get('scratchdir'), aipsdisk=kwargs.get('aipsdisk'), overwrite=False) else: ObitSys = AIPSSetup.AIPSSetup(err, configfile=kwargs.get('configFile'), scratchdir=kwargs.get('scratchdir'), aipsdisk=kwargs.get('aipsdisk')) # Get the set up AIPS environment. AIPS_ROOT = os.environ['AIPS_ROOT'] AIPS_VERSION = os.environ['AIPS_VERSION'] nThreads = 72 user = OSystem.PGetAIPSuser() AIPS.userno = user disk = 1 fitsdisk = 0 nam = os.path.basename(os.path.splitext(files[0])[0])[0:10] cls = "Raw" seq = 1 ############################# Initialise Parameters ########################################## ####### Initialize parameters dictionary ##### parms = KATInitContParms() ####### User defined parameters ###### if kwargs.get('parmFile'): print("parmFile", kwargs.get('parmFile')) exec(open(kwargs.get('parmFile')).read()) EVLAAddOutFile(os.path.basename(kwargs.get('parmFile')), 'project', 'Pipeline input parameters') ############### Initialize katfile object, uvfits object and condition data ######################### OK = False # Open the h5 file as a katfile object try: #open katfile and perform selection according to kwargs katdata = katfile.open(h5file) OK = True except Exception as exception: print(exception) if not OK: OErr.PSet(err) OErr.PLog(err, OErr.Fatal, "Unable to read KAT HDF5 data in " + str(h5file)) raise KATUnimageableError("Unable to read KAT HDF5 data in " + str(h5file)) #Are we MeerKAT or KAT-7 telescope = katdata.ants[0].name[0] if telescope == 'm': sefd = 500. else: sefd = 1200. #Get calibrator models fluxcals = katpoint.Catalogue( open(FITSDir.FITSdisks[0] + "/" + parms["fluxModel"])) #Condition data (get bpcals, update names for aips conventions etc) KATh5Condition(katdata, fluxcals, err) ###################### Data selection and static edits ############################################ # Select data based on static imageable parameters MKATh5Select(katdata, parms, err, **kwargs) # General AIPS data parameters at script level dataClass = ("UVDa")[0:6] # AIPS class of raw uv data band = katdata.spectral_windows[0].product #Correlator product project = os.path.basename(os.path.splitext(files[0])[0])[ 0:10] # Project name (12 char or less, used as AIPS Name) outIClass = parms["outIClass"] # image AIPS class debug = parms["debug"] check = parms["check"] ####################### Import data into AIPS ##################################################### # Reuse or nay? if kwargs.get('reuse'): uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), dataClass, disk, seq, True, err) obsdata = KATH5toAIPS.GetKATMeta(katdata, err) # Extract AIPS parameters of the uv data to the metadata obsdata["Aproject"] = uv.Aname obsdata["Aclass"] = uv.Aclass obsdata["Aseq"] = uv.Aseq obsdata["Adisk"] = disk obsdata["calInt"] = katdata.dump_period obsdata["fitsdisk"] = fitsdisk # TODO: Check if the input data has been Hanned. doneHann = True else: # Number of baselines gives batch size nbl = len( np.unique([(cp[0][:-1] + cp[1][:-1]).upper() for cp in katdata.corr_products])) # Construct a template uvfits file from master template mastertemplate = ObitTalkUtil.FITSDir.FITSdisks[ fitsdisk] + 'MKATTemplate.uvtab.gz' outtemplate = nam + '.uvtemp' KATH5toAIPS.MakeTemplate(mastertemplate, outtemplate, len(katdata.channel_freqs), nvispio=nbl) uv = OTObit.uvlod(outtemplate, 0, EVLAAIPSName(project), cls, disk, seq, err) obsdata = KATH5toAIPS.KAT2AIPS(katdata, uv, disk, fitsdisk, err, calInt=katdata.dump_period, **kwargs) MakeIFs.UVMakeIF(uv, 8, err) os.remove(outtemplate) # Print the uv data header to screen. uv.Header(err) ############################# Set Project Processing parameters ################################### # Parameters derived from obsdata and katdata MKATGetObsParms(obsdata, katdata, parms, logFile) ###### Initialise target parameters ##### KATInitTargParms(katdata, parms, err) # Load the outputs pickle jar EVLAFetchOutFiles() OSystem.PAllowThreads(nThreads) # Allow threads in Obit/oython retCode = 0 maxgap = max(parms["CalAvgTime"], 20 * katdata.dump_period) / 60. ################### Start processing ############################################################### mess = "Start project "+parms["project"]+" AIPS user no. "+str(AIPS.userno)+\ ", KAT7 configuration "+parms["KAT7Cfg"] printMess(mess, logFile) if debug: pydoc.ttypager = pydoc.plainpager # don't page task input displays mess = "Using Debug mode " printMess(mess, logFile) if check: mess = "Only checking script" printMess(mess, logFile) # Log parameters printMess("Parameter settings", logFile) for p in parms: mess = " " + p + ": " + str(parms[p]) printMess(mess, logFile) clist = [] for DCal in parms["DCals"]: if DCal["Source"] not in clist: clist.append(DCal["Source"]) for PCal in parms["PCals"]: if PCal["Source"] not in clist: clist.append(PCal["Source"]) for ACal in parms["ACals"]: if ACal["Source"] not in clist: clist.append(ACal["Source"]) if kwargs.get('targets') is not None: targets = [ targ.name for targ in katdata.catalogue if (targ.name not in clist) and ( targ.name in kwargs.get('targets').split(',')) ] else: targets = [ targ.name for targ in katdata.catalogue if (targ.name not in clist) ] refAnt = FetchObject(fileRoot + ".refAnt.pickle") # Save parameters to pickle jar, manifest ParmsPicklefile = fileRoot + ".Parms.pickle" # Where results saved SaveObject(parms, ParmsPicklefile, True) EVLAAddOutFile(os.path.basename(ParmsPicklefile), 'project', 'Processing parameters used') loadClass = dataClass # Hanning - No Hanning parms["doHann"] = False doneHann = False if doneHann: # Halve channels after hanning. parms["selChan"] = int(parms["selChan"] / 2) parms["BChDrop"] = int(parms["BChDrop"] / 2) parms["EChDrop"] = int(parms["EChDrop"] / 2) if uv == None and not check: raise RuntimeError("Cannot Hann data ") # Clear any old calibration/editing if parms["doClearTab"] or kwargs.get('reuse'): mess = "Clear previous calibration" printMess(mess, logFile) EVLAClearCal(uv, err, doGain=parms["doClearGain"], doFlag=parms["doClearFlag"], doBP=parms["doClearBP"], check=check) OErr.printErrMsg(err, "Error resetting calibration") # Quack to remove data from start and end of each scan if parms["doQuack"]: retCode = EVLAQuack (uv, err, begDrop=parms["quackBegDrop"], endDrop=parms["quackEndDrop"], \ Reason=parms["quackReason"], \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error Quacking data") # Flag antennas shadowed by others? if parms["doShad"]: retCode = EVLAShadow (uv, err, shadBl=parms["shadBl"], \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error Shadow flagging data") # Median window time editing, for RFI impulsive in time if parms["doMednTD1"]: mess = "Median window time editing, for RFI impulsive in time:" printMess(mess, logFile) retCode = EVLAMedianFlag (uv, clist, err, noScrat=noScrat, nThreads=nThreads, \ avgTime=parms["mednAvgTime"], avgFreq=parms["mednAvgFreq"], chAvg= parms["mednChAvg"], \ timeWind=parms["mednTimeWind"],flagVer=2, flagTab=2,flagSig=parms["mednSigma"], \ logfile=logFile, check=check, debug=False) if retCode != 0: raise RuntimeError("Error in MednFlag") # Median window frequency editing, for RFI impulsive in frequency if parms["doFD1"]: mess = "Median window frequency editing, for RFI impulsive in frequency:" printMess(mess, logFile) retCode = EVLAAutoFlag (uv, clist, err, flagVer=2, flagTab=2, doCalib=-1, doBand=-1, \ timeAvg=parms["FD1TimeAvg"], \ doFD=True, FDmaxAmp=1.0e20, FDmaxV=1.0e20, FDwidMW=parms["FD1widMW"], \ FDmaxRMS=[1.0e20,0.1], FDmaxRes=parms["FD1maxRes"], \ FDmaxResBL= parms["FD1maxRes"], FDbaseSel=parms["FD1baseSel"],\ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in AutoFlag") # Parallactic angle correction? if parms["doPACor"]: retCode = EVLAPACor(uv, err, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Parallactic angle correction") # Need to find a reference antenna? See if we have saved it? if (parms["refAnt"] <= 0): refAnt = FetchObject(fileRoot + ".refAnt.pickle") if refAnt: parms["refAnt"] = refAnt # Use bandpass calibrator and center half of each spectrum if parms["refAnt"] <= 0: mess = "Find best reference antenna: run Calib on BP Cal(s) " printMess(mess, logFile) parms["refAnt"] = EVLAGetRefAnt(uv, parms["BPCals"], err, flagVer=0, \ solInt=parms["bpsolint1"], nThreads=nThreads, \ logfile=logFile, check=check, debug=debug) if err.isErr: raise RuntimeError("Error finding reference antenna") if parms["refAnts"][0] <= 0: parms["refAnts"][0] = parms["refAnt"] mess = "Picked reference antenna " + str(parms["refAnt"]) printMess(mess, logFile) # Save it ParmsPicklefile = fileRoot + ".Parms.pickle" # Where results saved SaveObject(parms, ParmsPicklefile, True) refAntPicklefile = fileRoot + ".refAnt.pickle" # Where results saved SaveObject(parms["refAnt"], refAntPicklefile, True) # Plot Raw, edited data? parms["doRawSpecPlot"] = False parms["doSpecPlot"] = False if parms["doRawSpecPlot"] and parms["plotSource"]: mess = "Raw Spectral plot for: " + ' '.join(parms["BPCal"]) printMess(mess, logFile) plotFile = fileRoot + "_RawSpec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=-1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") EVLAAddOutFile(plotFile, 'project', 'Pipeline log file') # delay calibration if parms["doDelayCal"] and parms["DCals"] and not check: plotFile = fileRoot + "_DelayCal.ps" retCode = EVLADelayCal(uv, parms["DCals"], err, \ BChan=parms["delayBChan"], EChan=parms["delayEChan"], \ doCalib=2, flagVer=0, doBand=-1, \ solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"], \ refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], doZeroPhs=parms["delayZeroPhs"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, \ nThreads=nThreads, noScrat=noScrat, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in delay calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_DelaySpec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \ plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=-1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") print(parms["bpBChan1"], parms["bpEChan1"], parms["bpBChan2"], parms["bpEChan2"], parms["bpChWid2"]) # Bandpass calibration if parms["doBPCal"] and parms["BPCals"]: retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \ solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \ BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \ BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \ doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \ UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Bandpass calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_BPSpec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, \ parms["refAnt"], err, Stokes=["RR","LL"], doband=2, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # Amp & phase Calibrate if parms["doAmpPhaseCal"]: plotFile = fileRoot + "_APCal.ps" retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], doCalib=2, doBand=2, BPVer=1, flagVer=0, \ BChan=parms["ampBChan"], EChan=parms["ampEChan"], \ solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \ doAmpEdit=parms["doAmpEdit"], ampSigma=parms["ampSigma"], \ ampEditFG=parms["ampEditFG"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \ nThreads=nThreads, noScrat=noScrat, logfile=logFile, check=check, debug=debug) #print parms["ACals"],parms["PCals"] if retCode != 0: raise RuntimeError("Error calibrating") # More editing if parms["doAutoFlag"]: mess = "Post calibration editing:" printMess(mess, logFile) # if going to redo then only calibrators if parms["doRecal"]: # Only calibrators clist = [] for DCal in parms["DCals"]: if DCal["Source"] not in clist: clist.append(DCal["Source"]) for PCal in parms["PCals"]: if PCal["Source"] not in clist: clist.append(PCal["Source"]) for ACal in parms["ACals"]: if ACal["Source"] not in clist: clist.append(ACal["Source"]) else: clist = [] retCode = EVLAAutoFlag (uv, clist, err, flagVer=0, flagTab =2, \ doCalib=2, gainUse=0, doBand=2, BPVer=1, \ IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \ doFD=parms["doFirstAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \ FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \ FDmaxRes=parms["FDmaxRes"], FDmaxResBL=parms["FDmaxResBL"], \ FDbaseSel=parms["FDbaseSel"], \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in AutoFlag") # Redo the calibration using new flagging? if parms["doBPCal2"] == None: parms["doBPCal2"] = parms["doBPCal"] if parms["doDelayCal2"] == None: parms["doDelayCal2"] = parms["doDelayCal2"] if parms["doAmpPhaseCal2"] == None: parms["doAmpPhaseCal2"] = parms["doAmpPhaseCal"] if parms["doAutoFlag2"] == None: parms["doAutoFlagCal2"] = parms["doAutoFlag"] if parms["doRecal"]: mess = "Redo calibration:" printMess(mess, logFile) EVLAClearCal(uv, err, doGain=True, doFlag=False, doBP=True, check=check, logfile=logFile) OErr.printErrMsg(err, "Error resetting calibration") # Parallactic angle correction? if parms["doPACor"]: retCode = EVLAPACor(uv, err, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Parallactic angle correction") # Delay recalibration if parms["doDelayCal2"] and parms["DCals"] and not check: plotFile = fileRoot + "_DelayCal2.ps" retCode = EVLADelayCal(uv, parms["DCals"], err, \ BChan=parms["delayBChan"], EChan=parms["delayEChan"], \ doCalib=2, flagVer=0, doBand=-1, \ solInt=parms["delaySolInt"], smoTime=parms["delaySmoo"], \ refAnts=[parms["refAnt"]], doTwo=parms["doTwo"], \ doZeroPhs=parms["delayZeroPhs"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, \ nThreads=nThreads, noScrat=noScrat, \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in delay calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_DelaySpec2.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=-1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # Bandpass calibration if parms["doBPCal2"] and parms["BPCals"]: retCode = KATBPCal(uv, parms["BPCals"], err, noScrat=noScrat, solInt1=parms["bpsolint1"], \ solInt2=parms["bpsolint2"], solMode=parms["bpsolMode"], \ BChan1=parms["bpBChan1"], EChan1=parms["bpEChan1"], \ BChan2=parms["bpBChan2"], EChan2=parms["bpEChan2"], ChWid2=parms["bpChWid2"], \ doCenter1=parms["bpDoCenter1"], refAnt=parms["refAnt"], \ UVRange=parms["bpUVRange"], doCalib=2, gainUse=0, flagVer=0, doPlot=False, \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in Bandpass calibration") # Plot corrected data? if parms["doSpecPlot"] and parms["plotSource"]: plotFile = fileRoot + "_BPSpec2.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, plotFile, parms["refAnt"], err, \ Stokes=["RR","LL"], doband=2, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # Amp & phase Recalibrate if parms["doAmpPhaseCal2"]: plotFile = fileRoot + "_APCal2.ps" retCode = KATCalAP (uv, [], parms["ACals"], err, PCals=parms["PCals"], \ doCalib=2, doBand=2, BPVer=1, flagVer=0, \ BChan=parms["ampBChan"], EChan=parms["ampEChan"], \ solInt=parms["solInt"], solSmo=parms["solSmo"], ampScalar=parms["ampScalar"], \ doAmpEdit=True, ampSigma=parms["ampSigma"], \ ampEditFG=parms["ampEditFG"], \ doPlot=parms["doSNPlot"], plotFile=plotFile, refAnt=parms["refAnt"], \ noScrat=noScrat, nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error calibrating") # More editing parms["doAutoFlag2"] = False if parms["doAutoFlag2"]: mess = "Post recalibration editing:" printMess(mess, logFile) retCode = EVLAAutoFlag (uv, [], err, flagVer=0, flagTab=2, \ doCalib=2, gainUse=0, doBand=2, BPVer=1, \ IClip=parms["IClip"], minAmp=parms["minAmp"], timeAvg=parms["timeAvg"], \ doFD=parms["doSecAFFD"], FDmaxAmp=parms["FDmaxAmp"], FDmaxV=parms["FDmaxV"], \ FDwidMW=parms["FDwidMW"], FDmaxRMS=parms["FDmaxRMS"], \ FDmaxRes=parms["FDmaxRes"], FDmaxResBL= parms["FDmaxResBL"], \ FDbaseSel=parms["FDbaseSel"], \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in AutoFlag") # end recal # Calibrate and average data # Overwrite avgStokes from command line parms["avgFreq"] = 0 parms["chAvg"] = 1 parms["doCalAvg"] = 'Splat' if kwargs.get('halfstokes'): parms["avgStokes"] = 'HALF' if parms["doCalAvg"] == 'Splat': retCode = KATCalAvg (uv, avgClass, parms["seq"], parms["CalAvgTime"], err, \ flagVer=2, doCalib=2, gainUse=0, doBand=2, BPVer=1, doPol=False, \ avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], Stokes=parms["avgStokes"], \ BChan=1, EChan=parms["selChan"] - 1, doAuto=parms["doAuto"], \ BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \ nThreads=nThreads, logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in CalAvg") elif parms["doCalAvg"] == 'BL': retCode = KATBLCalAvg (uv, avgClass, parms["seq"], err, \ flagVer=2, doCalib=2, gainUse=0, doBand=2, BPVer=1, doPol=False, \ avgFreq=parms["avgFreq"], chAvg=parms["chAvg"], FOV=parms['FOV'], \ maxInt=min(parms["solPInt"],parms["solAInt"]), Stokes=parms["avgStokes"], \ BChan=1, EChan=parms["selChan"] - 1, timeAvg=parms["CalAvgTime"], \ BIF=parms["CABIF"], EIF=parms["CAEIF"], Compress=parms["Compress"], \ logfile=logFile, check=check, debug=debug) if retCode != 0: raise RuntimeError("Error in BLCalAvg") if parms["doSaveTab"]: filename = project + ".CalTab.uvtab" _ = EVLAUVFITSTab(uv, filename, 0, err, logfile=logFile) #Zap unaveraged data if requested if kwargs.get('zapraw'): uv.Zap(err) # Get calibrated/averaged data if not check: uv = UV.newPAUV("AIPS UV DATA", EVLAAIPSName(project), avgClass[0:6], \ disk, parms["seq"], True, err) if err.isErr: OErr.printErrMsg(err, "Error creating cal/avg AIPS data") plotFile = fileRoot + "_Spec.ps" retCode = EVLASpectrum(uv, parms["BPCal"], parms["plotTime"], maxgap, \ plotFile, parms["refAnt"], err, \ Stokes=["I"], doband=-1, docalib=-1, \ check=check, debug=debug, logfile=logFile ) if retCode != 0: raise RuntimeError("Error in Plotting spectrum") # KATUVFITS(uv, 'preimage.uvfits', 0, err, exclude=["AIPS HI", "AIPS SL", "AIPS PL"], # include=["AIPS AN", "AIPS FQ"], compress=parms["Compress"], logfile=logFile) KATUVFITab(uv, project + '.uvtab', 0, err) #Gzip the data? if kwargs.get('gzip'): os.system('pigz -p %d %s' % (nThreads, project + '.uvtab')) os.system('rm -f %s' % (project + '.uvtab'))
if float(cycle_length) > 0: period = float(cycle_length) freq = 1.0 / period print "kat.ptuse_1.req.ptuse_cal_freq (" + data_product_id + ", " + beam_id + ", " + str( freq) + ")" reply = kat.ptuse_1.req.ptuse_cal_freq(data_product_id, beam_id, freq) print "kat.ptuse_1.req.ptuse_cal_freq returned " + str(reply) # Temporary haxx to make sure that AP accepts the upcoming track request time.sleep(2) if opts.cal == 'flux': timenow = katpoint.Timestamp() sources = katpoint.Catalogue(add_specials=False) user_logger.info('Performing flux calibration') ra, dec = target.apparent_radec(timestamp=timenow) targetName = target.name.replace(" ", "") print targetName target.name = targetName + '_O' sources.add(target) if opts.cal == 'fluxN': timenow = katpoint.Timestamp() sources = katpoint.Catalogue(add_specials=False) user_logger.info('Performing flux calibration') ra, dec = target.apparent_radec(timestamp=timenow) print target print "ra %f ,dec %f" % (katpoint.rad2deg(ra),
opts, args = parser.parse_args() if len(args) == 0: raise ValueError("Please specify at least one target argument via name ('Cygnus A'), " "description ('azel, 20, 30') or catalogue file name ('sources.csv')") # Check options and build KAT configuration, connecting to proxies and devices with verify_and_connect(opts) as kat: args_target_list =[] if not kat.dry_run and kat.ants.req.mode('STOP') : user_logger.info("Setting Antenna Mode to 'STOP', Powering on Antenna Drives.") else: user_logger.error("Unable to set Antenna mode to 'STOP'.") observation_sources = katpoint.Catalogue(antenna=kat.sources.antenna) for catfile in args: try: observation_sources.add_tle(file(catfile)) except IOError: # If the file failed to load assume it is a target string args_target_list.append(catfile) if len(args_target_list) > 0 : args_target_obj = collect_targets(kat,args_target_list) observation_sources.add(args_target_obj) #user_logger.info("Found %d targets from Command line and %d targets from %d Catalogue(s) " % # (len(args_target_obj),num_catalogue_targets,len(args)-len(args_target_list),)) # Quit early if there are no sources to observe if len(observation_sources.filter(el_limit_deg=opts.horizon)) == 0: user_logger.warning("No targets are currently visible - please re-run the script later") else: # Start capture session, which creates HDF5 file
def test_gains_export(self): """Check l2 export to telstate""" nchan = 128 nif = 4 dump_period = 1.0 centre_freq = 1200.e6 bandwidth = 100.e6 solPint = dump_period / 2. solAint = dump_period AP_telstate = 'product_GAMP_PHASE' P_telstate = 'product_GPHASE' spws = [{'centre_freq': centre_freq, 'num_chans': nchan, 'channel_width': bandwidth / nchan, 'sideband': 1, 'band': 'L'}] ka_select = {'pol': 'HH,VV', 'scans': 'track', 'corrprods': 'cross', 'nif': nif} uvblavg_params = {'maxFact': 1.0, 'avgFreq': 0, 'FOV': 100.0, 'maxInt': 1.e-6} mfimage_params = {'Niter': 50, 'FOV': 0.1, 'xCells': 5., 'yCells': 5., 'doGPU': False, 'Robust': -1.5, 'minFluxPSC': 0.1, 'solPInt': solPint / 60., 'solPMode': 'P', 'minFluxASC': 0.1, 'solAInt': solAint / 60., 'maxFBW': 0.02} # Simulate a '10Jy' source at the phase center cat = katpoint.Catalogue() cat.add(katpoint.Target( "Alberich lord of the Nibelungs, radec, 20.0, -30.0, (856. 1712. 1. 0. 0.)")) telstate = TelescopeState() # Set up a scratch space in /tmp fd = kc.get_config()['fitsdirs'] fd += [(None, '/tmp/FITS')] kc.set_config(cb_id='CBID', fitsdirs=fd) setup_aips_disks() scan = [('track', 4, cat.targets[0])] # Construct a simulated dataset with our # point source at the centre of the field ds = MockDataSet(timestamps={'start_time': 0.0, 'dump_period': dump_period}, subarrays=DEFAULT_SUBARRAYS, spws=spws, dumps=scan, vis=partial(vis, sources=cat), weights=weights, flags=flags) # Try one round of phase only self-cal & Amp+Phase self-cal mfimage_params['maxPSCLoop'] = 1 mfimage_params['maxASCLoop'] = 1 # Run the pipeline pipeline = pipeline_factory('online', ds, telstate, katdal_select=ka_select, uvblavg_params=uvblavg_params, mfimage_params=mfimage_params) pipeline.execute() ts = telstate.view('selfcal') # Check what we have in telstate agrees with what we put in self.assertEqual(len(ts['antlist']), len(ANTENNA_DESCRIPTIONS)) self.assertEqual(ts['bandwidth'], bandwidth) self.assertEqual(ts['n_chans'], nif) pol_ordering = [pol[0] for pol in sorted(CORR_ID_MAP, key=CORR_ID_MAP.get) if pol[0] == pol[1]] self.assertEqual(ts['pol_ordering'], pol_ordering) if_width = bandwidth / nif center_if = nif // 2 start_freq = centre_freq - (bandwidth / 2.) self.assertEqual(ts['center_freq'], start_freq + if_width * (center_if + 0.5)) self.assertIn(ts.join('selfcal', P_telstate), ts.keys()) self.assertIn(ts.join('selfcal', AP_telstate), ts.keys()) def check_gains_timestamps(gains, expect_timestamps): timestamps = [] for gain, timestamp in gains: np.testing.assert_array_almost_equal(np.abs(gain), 1.0, decimal=3) np.testing.assert_array_almost_equal(np.angle(gain), 0.0) timestamps.append(timestamp) np.testing.assert_array_almost_equal(timestamps, expect_timestamps, decimal=1) # Check phase-only gains and timestamps P_times = np.arange(solPint, ds.end_time.secs, 2. * solPint) check_gains_timestamps(ts.get_range(P_telstate, st=0), P_times) # Check Amp+Phase gains AP_times = np.arange(solAint, ds.end_time.secs, 2. * solAint) check_gains_timestamps(ts.get_range(AP_telstate, st=0), AP_times) # Check with no Amp+Phase self-cal mfimage_params['maxASCLoop'] = 0 telstate.clear() pipeline = pipeline_factory('online', ds, telstate, katdal_select=ka_select, uvblavg_params=uvblavg_params, mfimage_params=mfimage_params) pipeline.execute() self.assertIn(telstate.join('selfcal', P_telstate), ts.keys()) self.assertNotIn(telstate.join('selfcal', AP_telstate), ts.keys()) # Check with no self-cal mfimage_params['maxPSCLoop'] = 0 telstate.clear() pipeline = pipeline_factory('online', ds, telstate, katdal_select=ka_select, uvblavg_params=uvblavg_params, mfimage_params=mfimage_params) pipeline.execute() self.assertNotIn(telstate.join('selfcal', P_telstate), ts.keys()) self.assertNotIn(telstate.join('selfcal', AP_telstate), ts.keys()) # Cleanup workspace shutil.rmtree(fd[-1][1])
def test_skip_empty(self): cat = katpoint.Catalogue(['', '# comment', ' ', '\t\r ']) self.assertEqual(len(cat), 0)
# # Ludwig Schwardt # 7 October 2011 # import numpy as np from scikits.fitting import PiecewisePolynomial1DFit import katpoint from astropy.table import Table # Load tables in one shot (don't verify, as the VizieR VOTables contain a deprecated DEFINITIONS element) table = Table.read('tabara.vot') pol_table = Table.read('tabara_pol.vot') # Use Kuehr 1Jy catalogue to provide flux density models flux_cat = katpoint.Catalogue(open('kuehr1Jy_source_list.csv')) # Use ATCA calibrator list to provide positions (and as a first-level check of source structure) atca_cat = katpoint.Catalogue(open('atca_calibrators.csv')) # # SELECTION CRITERIA # # Select sources with > 1 Jy flux at 6cm (hopefully matching the 1Jy catalogue) # This also includes flux = nan, which indicates unknown flux density for source flux_limit = 1.0 # Select sources south of +5 degrees declination dec_limit = 5.0 # Select sources with absolute rotation measure less than 30 rad/m^2 # (allows a Q/U model that is reasonably independent of frequency over the KAT-7 band) # This also includes RM = 0, which indicates unknown RM rm_limit = 30.0