def load_data(fname):
    """
        load dataset using katfile
    """
    # Exception, catching all possible command-line errors (IOError, TypeError, NameError)
    try:
        f = open(fname)
    except (IOError, TypeError, NameError) as e:
        raise SystemExit(e)

    observer = ('Obsever: %s' % (f.observer))
    name = ('Filename: %s' % os.path.basename(f.name))
    fsize = ('Filesize: %.2f %s' % (f.size*1.0e-9, 'GB'))
    description = ('Description: %s' % (f.description))
    centre_freq = ('Centre Freq [MHz]: %s' % (f.spectral_windows[0].centre_freq*1e-6))
    dump = ('Dump Period: %0.4f' % f.dump_period)
    start_time = ('Start Time: %s' % (f.start_time))
    end_time = ('End Time: %s' % (f.end_time))
    targets = [('%s' % (i.name)) for i in f.catalogue.targets]
    ants = [ ant.name for ant in f.ants]
    freqs = f.channel_freqs
    chans = f.channels
    tstamps = f.timestamps[:]
    meta_data = [observer, name, fsize, description, centre_freq, dump,start_time, end_time]

    return {'metadata':meta_data,'targets':targets,'fileopened':f, 'ants':ants,'freqs':freqs, 'chans':chans, 'tstamps':tstamps}
예제 #2
0
def load_data(fname):
    """
        load dataset using katfile
    """
    # Exception, catching all possible command-line errors (IOError, TypeError, NameError)
    try:
        f = open(fname)
    except (IOError, TypeError, NameError) as e:
        raise SystemExit(e)

    observer = ('Obsever: %s' % (f.observer))
    name = ('Filename: %s' % os.path.basename(f.name))
    fsize = ('Filesize: %.2f %s' % (f.size * 1.0e-9, 'GB'))
    description = ('Description: %s' % (f.description))
    centre_freq = ('Centre Freq [MHz]: %s' %
                   (f.spectral_windows[0].centre_freq * 1e-6))
    dump = ('Dump Period: %0.4f' % f.dump_period)
    start_time = ('Start Time: %s' % (f.start_time))
    end_time = ('End Time: %s' % (f.end_time))
    targets = [('%s' % (i.name)) for i in f.catalogue.targets]
    ants = [ant.name for ant in f.ants]
    freqs = f.channel_freqs
    chans = f.channels
    tstamps = f.timestamps[:]
    meta_data = [
        observer, name, fsize, description, centre_freq, dump, start_time,
        end_time
    ]

    return {
        'metadata': meta_data,
        'targets': targets,
        'fileopened': f,
        'ants': ants,
        'freqs': freqs,
        'chans': chans,
        'tstamps': tstamps
    }
parser.add_option('-s', '--split', dest='split', action="store_true", metavar='SPLIT', default=False,help="Whether to split each horizon plot in half")
parser.add_option('-p', '--pol', dest = 'pol',type ='string',metavar ='POLARIZATION', default = 'HH',help = 'Polarization to load (e.g. HH for horizontal polarization ),\
                the default is the horizontal polarization')
    
(opts, args) = parser.parse_args()
# Check arguments
if len(args) < 1:
    print 'Please specify the data file to reduce'
    sys.exit(1)

if opts.pol is None:
    print "please specify which polarization to load"
    sys.exit(1)

print 'Loading baseline', opts.baseline+'-'+opts.pol, 'from data file', args[0]
f = katfile.open(args[0])
f.select(ants=opts.baseline, pol=opts.pol, scans='scan', channels=range(90,425))
# Extract azimuth and elevation angle from (azel), in degrees
azimuth = f.az
elevation = f.el
power_linear = np.abs(f.vis[:])
power_db = 10.0 * np.log10(power_linear).sum(axis=1)
assert len(azimuth) == len(elevation) == len(power_db)
print "Contour plotting horizon from %d points ..." % len(azimuth)
    # Calculate and plot tipping curves
    #plt.figure(1)
    #plt.clf()
    #plt.subplots_adjust(hspace=0.5)
data = (azimuth, elevation, power_db)
titles = ('Azimuth (deg)', 'Elevation (deg)', 'Power (dB) for %s %s' % (opts.baseline,opts.pol))
az_min, az_max = 0.95*min(azimuth), 0.95*max(azimuth)
예제 #4
0
#!/usr/bin/env python
import katfile
import sys

#
filename = sys.argv[1]
#
h5 = katfile.open(filename, refant="ant1")
x = h5.sensor.get("DBE/auto-delay")
print "number of autodelays", x.events[1]
print "min", min(x), "max", max(x)
print "targets", h5.catalogue.targets
print "antennas", h5.inputs
print "min elevation", h5.el.min()
print "max elevation", h5.el.max()
print "dump period", h5.dump_period
print "band", h5.channel_freqs.min() / 1e9, "to", h5.channel_freqs.max() / 1e9, "GHz"
print "start", h5.start_time.local(), "end", h5.end_time.local(), "duration", (
    h5.end_time - h5.start_time
) / 3600, "hrs"
# opts.pol = 'H'
# opts.ref_ant = 'ant2'
# opts.max_sigma = 0.2
# opts.time_offset = 0.0
# opts.exclude = ''
# import glob
# args = sorted(glob.glob('*.h5'))
# args = ['1315991422.h5']

if len(args) < 1:
    raise RuntimeError('Please specify HDF5 data file(s) to use as arguments of the script')

katpoint.logger.setLevel(30)

print "\nLoading and processing data...\n"
data = katfile.open(args, ref_ant=opts.ref_ant, time_offset=opts.time_offset)

# Select frequency channel range and only keep cross-correlation products and single pol in data set
if opts.freq_chans is not None:
    freq_chans = [int(chan_str) for chan_str in opts.freq_chans.split(',')]
    first_chan, last_chan = freq_chans[0], freq_chans[1]
    chan_range = slice(first_chan, last_chan + 1)
else:
    chan_range = slice(data.shape[1] // 4, 3 * data.shape[1] // 4)
active_pol = opts.pol.lower()
data.select(channels=chan_range, corrprods='cross', pol=active_pol)
if opts.ants is not None:
    data.select(ants=opts.ants, reset='')
ref_ant_ind = [ant.name for ant in data.ants].index(data.ref_ant)
baseline_inds = [(data.inputs.index(inpA), data.inputs.index(inpB)) for inpA, inpB in data.corr_products]
baseline_names = [('%s - %s' % (inpA[:-1], inpB[:-1])) for inpA, inpB in data.corr_products]
예제 #6
0
        session.raster_scan(target, num_scans=3, scan_duration=15, scan_extent=5.0, scan_spacing=0.5)
    if not kat.dry_run:
        # Wait until desired HDF5 file appears in the archive (this could take quite a while...)
        if not session.output_file:
            raise RuntimeError('Could not obtain name of HDF5 file that was recorded')
        user_logger.info("Waiting for HDF5 file '%s' to appear in archive" % (session.output_file,))
        h5file = session.get_archived_product(download_dir=os.path.abspath(os.path.curdir))
        if not os.path.isfile(h5file):
            raise RuntimeError("Could not download '%s' to %d" % (h5file, os.path.abspath(download_dir)))

if not kat.dry_run:
    cfg = kat.system

    # Obtain list of antennas and polarisations present in data set
    user_logger.info('Loading HDF5 file into scape and reducing the data')
    h5 = katfile.open(h5file)
    # Iterate through antennas
    for ant in h5.ants:
        ant_num = int(ant.name[3:])
        # Load file and do standard processing
        d = scape.DataSet(h5file, baseline='A%dA%d' % (ant_num, ant_num))
        d = d.select(freqkeep=range(start_freq_channel, end_freq_channel + 1))
        channel_freqs = d.freqs
        d.convert_power_to_temperature()
        d = d.select(labelkeep='scan', copy=False)
        d.average()
        # Only use the first compound scan for fitting beam and baseline
        compscan = d.compscans[0]
        # Calculate average target flux over entire band
        flux_spectrum = [compscan.target.flux_density(freq) for freq in channel_freqs]
        average_flux = np.mean([flux for flux in flux_spectrum if flux])
예제 #7
0
)
parser.add_option(
    '-s',
    '--significance',
    type='float',
    dest='jump_significance',
    default=10.,
    help=
    "Keep jumps that are bigger than margin by this factor (default %default)")

(opts, args) = parser.parse_args()
if len(args) < 1:
    print 'Please specify an HDF5 file to check'
    sys.exit(1)

data = katfile.open(args[0])
chan_range = slice(*[int(chan_str) for chan_str in opts.freq_chans.split(',')]) \
             if opts.freq_chans is not None else slice(data.shape[1] // 4, 3 * data.shape[1] // 4)
data.select(channels=chan_range)

# Number of real normal variables squared and added together
dof = 2 * data.shape[1] * data.channel_width * data.dump_period
corrprod_to_index = dict([
    (tuple(cp), ind)
    for cp, ind in zip(data.corr_products, range(len(data.corr_products)))
])

offset_stats = {}
print 'Individual firings: timestamp | offset +/- uncertainty (magnitude of jump)'
print '--------------------------------------------------------------------------'
for ant in data.ants:
예제 #8
0
#!/usr/bin/env python
import katfile
import sys
#
filename=sys.argv[1]
#
h5=katfile.open(filename,refant='ant1')
for ant in h5.ants:
    ecef=ant.position_ecef
    wgs =ant.ref_position_wgs84
    enu =ant.position_enu
    #print "ECEF values", ecef
    #print "WGS84 values",wgs
    print "ENU values %6.3f, %6.3f, %6.3f" %(enu)
예제 #9
0
# opts.pol = 'H'
# opts.ref_ant = 'ant2'
# opts.max_sigma = 0.2
# opts.time_offset = 0.0
# opts.exclude = ''
# import glob
# args = sorted(glob.glob('*.h5'))
# args = ['1315991422.h5']

if len(args) < 1:
    raise RuntimeError('Please specify HDF5 data file(s) to use as arguments of the script')

katpoint.logger.setLevel(30)

print "\nLoading and processing data...\n"
data = katfile.open(args, ref_ant=opts.ref_ant, time_offset=opts.time_offset)

# Select frequency channel range and only keep cross-correlation products and single pol in data set
if opts.freq_chans is not None:
    freq_chans = [int(chan_str) for chan_str in opts.freq_chans.split(',')]
    first_chan, last_chan = freq_chans[0], freq_chans[1]
    chan_range = slice(first_chan, last_chan + 1)
else:
    chan_range = slice(data.shape[1] // 4, 3 * data.shape[1] // 4)
active_pol = opts.pol.lower()
data.select(channels=chan_range, corrprods='cross', pol=active_pol)
if opts.ants is not None:
    data.select(ants=opts.ants, reset='')
ref_ant_ind = [ant.name for ant in data.ants].index(data.ref_ant)
baseline_inds = [(data.inputs.index(inpA), data.inputs.index(inpB)) for inpA, inpB in data.corr_products]
baseline_names = [('%s - %s' % (inpA[:-1], inpB[:-1])) for inpA, inpB in data.corr_products]