Esempio n. 1
0
def load_moonscan(filename):
    cal_coords = ephem.Equatorial("05:42:36.155", "+49:51:07.28",
                                  epoch=ephem.B1950)

    # convert cal to a Body object.
    cal_source = ephem.FixedBody()
    cal_source._ra = cal_coords.ra
    cal_source._dec = cal_coords.dec
    cal_source._epoch = cal_coords.epoch

    Reader = fitsGBT.Reader(filename)
    moon_dataobj = Reader.read(0,0)

    rotate_pol.rotate(moon_dataobj, (-5, -7, -8, -6))
    cal_scale.scale_by_cal(moon_dataobj, scale_t_ave=True, scale_f_ave=False,
                           sub_med=False, scale_f_ave_mod=False, rotate=True)
    flag_data.flag_data(moon_dataobj, 5, 0.1, 40)
    rebin_freq.rebin(moon_dataobj, 16, True, True)
    #rebin_time.rebin(moon_dataobj, 4)

    fgc_mueler_file = '/mnt/raid-project/gmrt/tcv/diff_gain_params/GBT12A_418/22_diff_gain_calc.txt'
    fgc_RM_file = ' '
    fgc_R_to_sky = True
    fgc_DP_correct = False  # this is already handled in scale_by_cal's rotate
    fgc_RM_correct = False

    from time_stream import flux_diff_gain_cal as fdg
    m_total = fdg.flux_dg(fgc_mueler_file)
    fdg.calibrate_pol(moon_dataobj, m_total, fgc_RM_file,
                      fgc_R_to_sky, fgc_DP_correct, fgc_RM_correct)

    return moon_dataobj
Esempio n. 2
0
 def test_subtracts_baseline(self) :
     rebin_freq.rebin(self.Data, 1.0)
     cal_scale.scale_by_cal(self.Data, sub_med=True)
     data = self.Data.data
     self.assertTrue(ma.allclose(ma.median(data, 0), 0.))
     # The following fails if you get rid of the rebin line, but in the 7th
     # digit.  Numpy must have only single precision somewhere.
     #self.assertAlmostEqual(ma.median(data[:,0,0,753]), 0.)
     self.assertAlmostEqual(ma.median(data), 0.)
Esempio n. 3
0
    def test_fave_scale(self) :
        hanning.hanning_smooth(self.Data)
        rebin_freq.rebin(self.Data, 2.)
        cal_scale.scale_by_cal(self.Data, False, True, False)
        data = self.Data.data

        self.assertTrue(ma.allclose(ma.mean(data[:,0,0,:] -
                                              data[:,0,1,:], -1), 1.0))
        self.assertTrue(ma.allclose(ma.mean(data[:,3,0,:] -
                                              data[:,3,1,:], -1), 1.0))
Esempio n. 4
0
def load_moonscan(filename, rotate_moon=True):
    cal_coords = ephem.Equatorial("05:42:36.155",
                                  "+49:51:07.28",
                                  epoch=ephem.B1950)

    # convert cal to a Body object.
    cal_source = ephem.FixedBody()
    cal_source._ra = cal_coords.ra
    cal_source._dec = cal_coords.dec
    cal_source._epoch = cal_coords.epoch

    Reader = fitsGBT.Reader(filename)
    moon_dataobj = Reader.read(0, 0)

    rotate_pol.rotate(moon_dataobj, (-5, -7, -8, -6))
    cal_scale.scale_by_cal(moon_dataobj,
                           scale_t_ave=True,
                           scale_f_ave=False,
                           sub_med=False,
                           scale_f_ave_mod=False,
                           rotate=True)
    flag_data.flag_data(moon_dataobj, 5, 0.1, 40)
    rebin_freq.rebin(moon_dataobj, 16, True, True)
    #rebin_time.rebin(moon_dataobj, 4)

    if rotate_moon:
        moon_rotation.rotate_pol_moon(moon_dataobj)

    fgc_mueler_file = '/mnt/raid-project/gmrt/tcv/diff_gain_params/GBT12A_418/22_diff_gain_calc.txt'
    fgc_RM_file = ' '
    fgc_R_to_sky = True
    fgc_DP_correct = False  # this is already handled in scale_by_cal's rotate
    fgc_RM_correct = False

    from time_stream import flux_diff_gain_cal as fdg
    m_total = fdg.flux_dg(fgc_mueler_file)
    fdg.calibrate_pol(moon_dataobj, m_total, fgc_RM_file, fgc_R_to_sky,
                      fgc_DP_correct, fgc_RM_correct)

    return moon_dataobj
Esempio n. 5
0
Blocks = []

for fname in fnames:
    # Read.
    fpath = root + fname
    Reader = fitsGBT.Reader(fpath)
    Data = Reader.read(0,0)
    Blocks.append(Data)

for Data in Blocks:
    # Preprocess.
    rotate_pol.rotate(Data, (-5, -7, -8, -6))
    cal_scale.scale_by_cal(Data, True, False, False, False, True)
    flag_data.flag_data(Data, 5, 0.1, 40)
    rebin_freq.rebin(Data, 8, True, True)
    rebin_time.rebin(Data, 4)
    #rotate_pol.rotate(Data, (1, 2, 3, 4))

def model(n_time, centre, width, amp_xx, amp_yy, amp_xy, amp_yx,
          off_xx, off_yy, off_yx, off_xy, slope_xx, slope_yy, slope_xy,
          slope_yx, gain_xx, gain_yy, quad_xx, quad_yy):
    
    # Preliminaries.
    time = np.arange(n_time, dtype=float) - centre
    out = np.empty((4, 2, n_time), dtype=float)
    # Generate a unit gaussian.
    gauss = np.exp(- time**2 / (2 * width**2))
    # Generate the four time series.
    out[0,:,:] = amp_xx * gauss + off_xx + slope_xx * time
    out[3,:,:] = amp_yy * gauss + off_yy + slope_yy * time
        inds_total += inds_sif

inds_total.sort()
# Seems to be nessisary for fitsdata[inds] to be the right type
inds = sp.array(inds_total)

testhdulist[1].data = fitsdata[inds]
testhdulist.writeto(test_file_name)


#### A series of test data files created from guppi data.
guppi_file_name = os.getenv("GBT_DATA") + "/GBT10B_036/42_wigglez15hrst_ralongmap_230-237.fits"
Reader = fitsGBT.Reader(guppi_file_name)
Blocks = Reader.read((0, 1), None)
for Data in Blocks:
    rebin_freq.rebin(Data, 32, True, True)
    rebin_time.rebin(Data, 2)

split_Blocks = ()
for Data in Blocks:
    split_Blocks += split_bands.split(Data, 2, 32, 25)

comb_Blocks = copy.deepcopy(split_Blocks)
for Data in comb_Blocks:
    combine_cal.combine(Data, sub_mean=False)

rot_Blocks = copy.deepcopy(comb_Blocks)
for Data in rot_Blocks:
    rotate_pol.rotate(Data)

# Measure some parameters from the noise.
 def process_file(self, middle) :
     """Split off to fix pyfits memory leak."""
     params = self.params
     # Construct the file name and read in all scans.
     file_name = params["input_root"] + middle + ".fits"
     Reader = fitsGBT.Reader(file_name)
     Blocks = Reader.read((), (), force_tuple=True)
     # Plotting limits need to be adjusted for on-off scans.
     if file_name.find("onoff") != -1 :
         onoff=True
     else :
         onoff=False
     # Initialize a few variables.
     counts = 0
     cal_sum_unscaled = 0
     cal_sum = 0
     cal_time = ma.zeros((0, 4))
     sys_time = ma.zeros((0, 4))
     cal_noise_spec = 0
     # Get the number of times in the first block and shorten to a
     # number that should be smaller than all blocks.
     nt = int(Blocks[0].dims[0]*.9)
     # Get the frequency axis.  Must be before loop because the data is
     # rebined in the loop.
     Blocks[0].calc_freq()
     f = Blocks[0].freq
     for Data in Blocks :
         # Rotate to XX, YY etc.
         rotate_pol.rotate(Data, (-5, -7, -8, -6))
         this_count = ma.count(Data.data[:,:,0,:] 
                               + Data.data[:,:,1,:], 0)
         cal_sum_unscaled += ma.sum(Data.data[:,:,0,:] +
                 Data.data[:,:,1,:], 0)
         # Time series of the cal temperture.
         cal_time = sp.concatenate((cal_time, ma.mean(Data.data[:,:,0,:]
             - Data.data[:,:,1,:], -1).filled(-1)), 0)
         # Everything else done in cal units.
         cal_scale.scale_by_cal(Data)
         # Time serise of the system temperture.
         sys_time = sp.concatenate((sys_time, ma.mean(Data.data[:,:,0,:]
             + Data.data[:,:,1,:], -1).filled(-5)), 0)
         # Accumulate variouse sums.
         counts += this_count
         cal_sum += ma.sum(Data.data[:,:,0,:] + Data.data[:,:,1,:], 0)
         # Take power spectrum of on-off/on+off.
         rebin_freq.rebin(Data, 512, mean=True, by_nbins=True)
         cal_diff = ((Data.data[:,[0,-1],0,:] 
                      - Data.data[:,[0,-1],1,:])
                     / (Data.data[:,[0,-1],0,:] 
                        + Data.data[:,[0,-1],1,:]))
         cal_diff -= ma.mean(cal_diff, 0)
         cal_diff = cal_diff.filled(0)[0:nt,...]
         power = abs(fft.fft(cal_diff, axis=0)[range(nt//2+1)])
         power = power**2/nt
         cal_noise_spec += power
     # Normalize.
     cal_sum_unscaled /= 2*counts
     cal_sum /= 2*counts
     # Get time steps and frequency wdith for noise power normalization.
     Data = Blocks[0]
     Data.calc_time()
     dt = abs(sp.mean(sp.diff(Data.time)))
     # Note that Data was rebined in the loop.
     dnu = abs(Data.field["CDELT1"])
     cal_noise_spec *= dt*dnu/len(Blocks)
     # Power spectrum independant axis.
     ps_freqs = sp.arange(nt//2 + 1, dtype=float)
     ps_freqs /= (nt//2 + 1)*dt*2
     # Long time axis.
     t_total = sp.arange(cal_time.shape[0])*dt
     # Make plots.
     h = plt.figure(figsize=(10,10))
     # Unscaled temperature spectrum.
     plt.subplot(3, 2, 1)
     plt.plot(f/1e6, sp.rollaxis(cal_sum_unscaled, -1))
     plt.xlim((7e2, 9e2))
     plt.xlabel("frequency (MHz)")
     plt.title("System temperature - mean over time")
     # Temperture spectrum in terms of noise cal. 4 Polarizations.
     plt.subplot(3, 2, 2)
     plt.plot(f/1e6, sp.rollaxis(cal_sum, -1))
     if onoff :
         plt.ylim((-1, 60))
     else :
         plt.ylim((-10, 40))
     plt.xlim((7e2, 9e2))
     plt.xlabel("frequency (MHz)")
     plt.title("System temperature in cal units")
     # Time serise of cal T.
     plt.subplot(3, 2, 3)
     plt.plot(t_total, cal_time)
     if onoff :
         plt.xlim((0,dt*900))
     else :
         plt.xlim((0,dt*3500))
     plt.xlabel("time (s)")
     plt.title("Noise cal temperature - mean over frequency")
     # Time series of system T.
     plt.subplot(3, 2, 4)
     plt.plot(t_total, sys_time)
     plt.xlabel("time (s)")
     if onoff :
         plt.ylim((-4, 90))
         plt.xlim((0,dt*900))
     else :
         plt.ylim((-4, 35))
         plt.xlim((0,dt*3500))
     plt.title("System temperature in cal units")
     # XX cal PS.
     plt.subplot(3, 2, 5)
     plt.loglog(ps_freqs, cal_noise_spec[:,0,:])
     plt.xlim((1.0/60, 1/(2*dt)))
     plt.ylim((1e-1, 1e3))
     plt.xlabel("frequency (Hz)")
     plt.title("XX cal power spectrum")
     # YY cal PS.
     plt.subplot(3, 2, 6)
     plt.loglog(ps_freqs, cal_noise_spec[:,1,:])
     plt.xlim((1.0/60, 1/(2*dt)))
     plt.ylim((1e-1, 1e3))
     plt.xlabel("frequency (Hz)")
     plt.title("YY cal power spectrum")
     # Adjust spacing.
     plt.subplots_adjust(hspace=.4)
     # Save the figure.
     plt.savefig(params['output_root'] + middle
             + params['output_end'])
 def test_subtracts_baseline(self) :
     rebin_freq.rebin(self.Data, 1.0)
     combine_cal.combine(self.Data, sub_mean=True, average_cals=False)
     data = self.Data.data
     self.assertTrue(ma.allclose(ma.mean(data, 0), 0.))
Esempio n. 9
0
# Read and preprocess the Data.
cal_Blocks = []
for fname in cal_files:
    # Read.
    fpath = data_root + fname + end
    Reader = fitsGBT.Reader(fpath)
    Data = Reader.read(0, 0)
    cal_Blocks.append(Data)

for Data in cal_Blocks:
    # Preprocess.
    rotate_pol.rotate(Data, (-5, -7, -8, -6))
    cal_scale.scale_by_cal(Data, True, False, False, False, True)
    flag_data.flag_data(Data, 5, 0.1, 40)
    #rebin_freq.rebin(Data, 16, True, True)
    rebin_freq.rebin(Data, 16, True, True)
    #combine_cal.combine(Data, (0.5, 0.5), False, True)
    combine_cal.combine(Data, (0., 1.), False, True)
    #rebin_time.rebin(Data, 4)

Data.calc_freq()

# Put all the data into a same format for the fits.
BeamData = beam_fit.FormattedData(cal_Blocks)

# Source object.  This just calculates the ephemeris of the source compared to
# where the telescope is pointing.
S = cal.source.Source(source)

# Do a preliminary fit to just the XX and YY polarizations.  This is a
# non-linear fit to the Gaussian and gets things like the centriod and the
Esempio n. 10
0
Blocks = []

for fname in fnames:
    # Read.
    fpath = root + fname
    Reader = fitsGBT.Reader(fpath)
    Data = Reader.read(0, 0)
    Blocks.append(Data)

for Data in Blocks:
    # Preprocess.
    rotate_pol.rotate(Data, (-5, -7, -8, -6))
    cal_scale.scale_by_cal(Data, True, False, False, False, True)
    flag_data.flag_data(Data, 5, 0.1, 40)
    rebin_freq.rebin(Data, 8, True, True)
    rebin_time.rebin(Data, 4)
    #rotate_pol.rotate(Data, (1, 2, 3, 4))


def model(n_time, centre, width, amp_xx, amp_yy, amp_xy, amp_yx, off_xx,
          off_yy, off_yx, off_xy, slope_xx, slope_yy, slope_xy, slope_yx,
          gain_xx, gain_yy, quad_xx, quad_yy):

    # Preliminaries.
    time = np.arange(n_time, dtype=float) - centre
    out = np.empty((4, 2, n_time), dtype=float)
    # Generate a unit gaussian.
    gauss = np.exp(-time**2 / (2 * width**2))
    # Generate the four time series.
    out[0, :, :] = amp_xx * gauss + off_xx + slope_xx * time
inds_total.sort()
# Seems to be nessisary for fitsdata[inds] to be the right type
inds = sp.array(inds_total)

testhdulist[1].data = fitsdata[inds]
testhdulist.writeto(test_file_name)


#### A series of test data files created from guppi data.
guppi_file_name  = (os.getenv('GBT_DATA')  + 
                    '/GBT10B_036/42_wigglez15hrst_ralongmap_230-237.fits')
Reader = fitsGBT.Reader(guppi_file_name)
Blocks = Reader.read((0,1), None)
for Data in Blocks:
    rebin_freq.rebin(Data, 32, True, True)
    rebin_time.rebin(Data, 2)

split_Blocks = ()
for Data in Blocks:
    split_Blocks += split_bands.split(Data, 2, 32, 25)

comb_Blocks = copy.deepcopy(split_Blocks)
for Data in comb_Blocks:
    combine_cal.combine(Data, sub_mean=False)

rot_Blocks = copy.deepcopy(comb_Blocks)
for Data in rot_Blocks:
    rotate_pol.rotate(Data)

# Measure some parameters from the noise.
Esempio n. 12
0
# Read and preprocess the Data.
cal_Blocks = []
for fname in cal_files:
    # Read.
    fpath = data_root + fname + end
    Reader = fitsGBT.Reader(fpath)
    Data = Reader.read(0,0)
    cal_Blocks.append(Data)

for Data in cal_Blocks:
    # Preprocess.
    rotate_pol.rotate(Data, (-5, -7, -8, -6))
    cal_scale.scale_by_cal(Data, True, False, False, False, True)
    flag_data.flag_data(Data, 5, 0.1, 40)
    #rebin_freq.rebin(Data, 16, True, True)
    rebin_freq.rebin(Data, 16, True, True)
    #combine_cal.combine(Data, (0.5, 0.5), False, True)
    combine_cal.combine(Data, (0., 1.), False, True)
    #rebin_time.rebin(Data, 4)

Data.calc_freq()

# Put all the data into a same format for the fits.
BeamData = beam_fit.FormattedData(cal_Blocks)

# Source object.  This just calculates the ephemeris of the source compared to
# where the telescope is pointing.
S = cal.source.Source(source)

# Do a preliminary fit to just the XX and YY polarizations.  This is a
# non-linear fit to the Gaussian and gets things like the centriod and the
Esempio n. 13
0
 def test_subtracts_baseline(self):
     rebin_freq.rebin(self.Data, 1.0)
     combine_cal.combine(self.Data, sub_mean=True, average_cals=False)
     data = self.Data.data
     self.assertTrue(ma.allclose(ma.mean(data, 0), 0.))