def fs_from_file(filename, frq, src,  
                 del_t=900, transposed=True, subtract_avg=False):

    f = h5py.File(filename, 'r')

    times = f['index_map']['time'].value['ctime'] + 10.6

    src_trans = eph.transit_times(src, times[0])

    # try to account for differential arrival time from cylinder rotation. 

    del_phi = (src._dec - np.radians(eph.CHIMELATITUDE)) * np.sin(np.radians(1.988))
    del_phi *= (24 * 3600.0) / (2 * np.pi)

    # Adjust the transit time accordingly                                                                                   
    src_trans += del_phi

    # Select +- del_t of transit, accounting for the mispointing      
    t_range = np.where((times < src_trans + del_t) & (times > src_trans - del_t))[0]

    times = times[t_range[0]:t_range[-1]]#[offp::2] test

    print "Time range:", times[0], times[-1]

    print "\n...... This data is from %s starting at RA: %f ...... \n" \
        % (eph.unix_to_datetime(times[0]), eph.transit_RA(times[0]))


    if transposed is True:
        v = f['vis'][frq[0]:frq[-1]+1, :]
        v = v[..., t_range[0]:t_range[-1]]
        vis = v['r'] + 1j * v['i']

        del v

    # Read in time and freq slice if data has not yet been transposed
    if transposed is False:
         v = f['vis'][t_range[0]:t_range[-1], frq[0]:frq[-1]+1, :]
         vis = v['r'][:] + 1j * v['i'][:]
         del v
         vis = np.transpose(vis, (1, 2, 0))

    inp = gen_inp()[0]

    # Remove offset from galaxy                                                                                
    if subtract_avg is True:
        vis -= 0.5 * (vis[..., 0] + vis[..., -1])[..., np.newaxis]

    freq_MHZ = 800.0 - np.array(frq) / 1024.0 * 400.
    print len(inp)

    baddies = np.where(np.isnan(tools.get_feed_positions(inp)[:, 0]))[0]

    # Fringestop to location of "src"

    data_fs = tools.fringestop_pathfinder(vis, eph.transit_RA(times), freq_MHZ, inp, src)
#    data_fs = fringestop_pathfinder(vis, eph.transit_RA(times), freq_MHZ, inp, src)


    return data_fs
Beispiel #2
0
    def read_bl_from_file(self, filename, ant_0, ant_1):
        feeds = []
        inputs = zip(*self.read_data.index_map['input'])[1]
        for i in pickle.load(open(filename)):
            if i.input_sn in (inputs[ant_0], inputs[ant_1]):
                feeds.append(i)

        pos0, pos1 = tools.get_feed_positions(feeds)
        bl2d = pos1-pos0
        bl = array((bl2d[0],bl2d[1],0.0))
        return bl
Beispiel #3
0
 def get_bl(self, ant_0, ant_1):
     feeds = []
     self.phys_bl = []
     inputs = zip(*self.read_data.index_map['input'])[1]
     for i in self.layout:
         if i.input_sn in (inputs[ant_0], inputs[ant_1]):
             feeds.append(i)
             if ant_0 == ant_1:
                 feeds.append(i)
     pos0, pos1 = tools.get_feed_positions(feeds)
     bl2d = pos1-pos0
     bl = array((bl2d[0],bl2d[1],0.0))
     return bl
    def feedpositions(self):
        """The set of feed positions on *all* cylinders.

        This is constructed for the given layout and includes all rotations of
        the cylinder axis.

        Returns
        -------
        feedpositions : np.ndarray
            The positions in the telescope plane of the receivers. Packed as
            [[u1, v1], [u2, v2], ...].
        """

        if self._pos is None:
            # Fetch cylinder relative positions
            pos = tools.get_feed_positions(self.feeds)

            # The above routine returns NaNs for non CHIME feeds. This is a bit
            # messy, so turn them into zeros.
            self._pos = np.nan_to_num(pos)

        return self._pos
def solve_ps_transit(filename, corrs, feeds, inp, 
          src, nfreq=1024, transposed=False, nfeed=128):
    """ Function that fringestops time slice 
    where point source is in the beam, takes 
    all correlations for a given polarization, and then 
    eigendecomposes the correlation matrix freq by freq
    after removing the fpga phases. It will also 
    plot intermediate steps to verify the phase solution.

    Parameters
    ----------
    filename : np.str
         Full-path filename 
    corrs : list
         List of correlations to use in solver
    feeds : list
         List of feeds to use
    inp   : 
         Correlator inputs (output of ch_util.tools.get_correlator_inputs)
    src   : ephem.FixedBody
         Source to calibrate off of. e.g. ch_util.ephemeris.TauA
    
    Returns
    -------
    Gains : np.array
         Complex gain array (nfreq, nfeed) 
    """

    nsplit = 32 # Number of freq chunks to divide nfreq into
    del_t = 800

    f = h5py.File(filename, 'r')

    # Add half an integration time to each. Hack. 
    times = f['index_map']['time'].value['ctime'] + 10.50
    src_trans = eph.transit_times(src, times[0])
    
    # try to account for differential arrival time from 
    # cylinder rotation. 
    del_phi = (src._dec - np.radians(eph.CHIMELATITUDE)) \
                 * np.sin(np.radians(1.988))
    del_phi *= (24 * 3600.0) / (2 * np.pi)

    # Adjust the transit time accordingly
    src_trans += del_phi

    # Select +- del_t of transit, accounting for the mispointing 
    t_range = np.where((times < src_trans + 
                  del_t) & (times > src_trans - del_t))[0]
 
    print "\n...... This data is from %s starting at RA: %f ...... \n" \
        % (eph.unix_to_datetime(times[0]), eph.transit_RA(times[0]))

    assert (len(t_range) > 0), "Source is not in this acq"

    # Create gains array to fill in solution
    Gains = np.zeros([nfreq, nfeed], np.complex128)
    
    print "Starting the solver"
    
    times = times[t_range[0]:t_range[-1]]
    
    k=0
    
    # Start at a strong freq channel that can be plotted
    # and from which we can find the noise source on-sample
    for i in range(12, nsplit) + range(0, 12):

        k+=1

        # Divides the arrays up into nfreq / nsplit freq chunks and solves those
        frq = range(i * nfreq // nsplit, (i+1) * nfreq // nsplit)
        
        print "      %d:%d \n" % (frq[0], frq[-1])

        # Read in time and freq slice if data has already been transposed
        if transposed is True:
            v = f['vis'][frq[0]:frq[-1]+1, corrs, :]
            v = v[..., t_range[0]:t_range[-1]]
            vis = v['r'] + 1j * v['i']

            if k==1:
                autos = auto_corrs(nfeed)
                offp = (abs(vis[:, autos, 0::2]).mean() > \
                        (abs(vis[:, autos, 1::2]).mean())).astype(int)

                times = times[offp::2]
            
            vis = vis[..., offp::2]

            gg = f['gain_coeff'][frq[0]:frq[-1]+1, 
                    feeds, t_range[0]:t_range[-1]][..., offp::2]

            gain_coeff = gg['r'] + 1j * gg['i']
            
            del gg
            


        # Read in time and freq slice if data has not yet been transposed
        if transposed is False:
            print "TRANSPOSED V OF CODE DOESN'T WORK YET!"
            v = f['vis'][t_range[0]:t_range[-1]:2, frq[0]:frq[-1]+1, corrs]
            vis = v['r'][:] + 1j * v['i'][:]
            del v

            gg = f['gain_coeff'][0, frq[0]:frq[-1]+1, feeds]
            gain_coeff = gg['r'][:] + 1j * gg['i'][:]

            vis = vis[..., offp::2]

            vis = np.transpose(vis, (1, 2, 0))


        # Remove fpga gains from data
        vis = remove_fpga_gains(vis, gain_coeff, nfeed=nfeed, triu=False)

        # Remove offset from galaxy
        vis -= 0.5 * (vis[..., 0] + vis[..., -1])[..., np.newaxis]
   
        # Get physical freq for fringestopper
        freq_MHZ = 800.0 - np.array(frq) / 1024.0 * 400.
    
        baddies = np.where(np.isnan(tools.get_feed_positions(inp)[:, 0]))[0]
        a, b, c = select_corrs(baddies, nfeed=128)

        vis[:, a + b] = 0.0

        # Fringestop to location of "src"
        data_fs = tools.fringestop_pathfinder(vis, eph.transit_RA(times), freq_MHZ, inp, src)

        del vis

        dr, sol_arr = solve_gain(data_fs)

        # Find index of point source transit
        drlist = np.argmax(dr, axis=-1)
        
        # If multiple freq channels are zerod, the trans_pix
        # will end up being 0. This is bad, so ensure that 
        # you are only looking for non-zero transit pixels.
        drlist = [x for x in drlist if x != 0]
        trans_pix = np.argmax(np.bincount(drlist))

        assert trans_pix != 0.0

        Gains[frq] = sol_arr[..., trans_pix-3:trans_pix+4].mean(-1)

        zz = h5py.File('data' + str(i) + '.hdf5','w')
        zz.create_dataset('data', data=dr)
        zz.close()

        print "%f, %d Nans out of %d" % (np.isnan(sol_arr).sum(), np.isnan(Gains[frq]).sum(), np.isnan(Gains[frq]).sum())
        print trans_pix, sol_arr[..., trans_pix-3:trans_pix+4].mean(-1).sum(), sol_arr.mean(-1).sum()

        # Plot up post-fs phases to see if everything has been fixed
        if frq[0] == 12 * nsplit:
            print "======================"
            print "   Plotting up freq: %d" % frq[0]
            print "======================"
            img_nm = './phs_plots/dfs' + np.str(frq[17]) + np.str(np.int(time.time())) +'.png'
            img_nmcorr = './phs_plots/dfs' + np.str(frq[17]) + np.str(np.int(time.time())) +'corr.png'

            plt_gains(data_fs, 0, img_name=img_nm, bad_chans=baddies)
            dfs_corr = correct_dfs(data_fs, np.angle(Gains[frq])[..., np.newaxis], nfeed=128)

            plt_gains(dfs_corr, 0, img_name=img_nmcorr, bad_chans=baddies)

            del dfs_corr

        del data_fs, a

    return Gains
Beispiel #6
0
def offline_point_source_calibration(file_list,
                                     source,
                                     inputmap=None,
                                     start=None,
                                     stop=None,
                                     physical_freq=None,
                                     tcorr=None,
                                     logging_params=DEFAULT_LOGGING,
                                     **kwargs):
    # Load config
    config = DEFAULTS.deepcopy()
    config.merge(NameSpace(kwargs))

    # Setup logging
    log.setup_logging(logging_params)
    mlog = log.get_logger(__name__)

    mlog.info("ephemeris file: %s" % ephemeris.__file__)

    # Set the model to use
    fitter_function = utils.fit_point_source_transit
    model_function = utils.model_point_source_transit

    farg = inspect.getargspec(fitter_function)
    defaults = {
        key: val
        for key, val in zip(farg.args[-len(farg.defaults):], farg.defaults)
    }
    poly_deg_amp = kwargs.get('poly_deg_amp', defaults['poly_deg_amp'])
    poly_deg_phi = kwargs.get('poly_deg_phi', defaults['poly_deg_phi'])
    poly_type = kwargs.get('poly_type', defaults['poly_type'])

    param_name = ([
        '%s_poly_amp_coeff%d' % (poly_type, cc)
        for cc in range(poly_deg_amp + 1)
    ] + [
        '%s_poly_phi_coeff%d' % (poly_type, cc)
        for cc in range(poly_deg_phi + 1)
    ])

    model_kwargs = [('poly_deg_amp', poly_deg_amp),
                    ('poly_deg_phi', poly_deg_phi), ('poly_type', poly_type)]
    model_name = '.'.join(
        [getattr(model_function, key) for key in ['__module__', '__name__']])

    tval = {}

    # Set where to evaluate gain
    ha_eval_str = ['raw_transit']

    if config.multi_sample:
        ha_eval_str += ['transit', 'peak']
        ha_eval = [0.0, None]
        fitslc = slice(1, 3)

    ind_eval = ha_eval_str.index(config.evaluate_gain_at)

    # Determine dimensions
    direction = ['amp', 'phi']
    nparam = len(param_name)
    ngain = len(ha_eval_str)
    ndir = len(direction)

    # Determine frequencies
    data = andata.CorrData.from_acq_h5(file_list,
                                       datasets=(),
                                       start=start,
                                       stop=stop)
    freq = data.freq

    if physical_freq is not None:
        index_freq = np.array(
            [np.argmin(np.abs(ff - freq)) for ff in physical_freq])
        freq_sel = utils.convert_to_slice(index_freq)
        freq = freq[index_freq]
    else:
        index_freq = np.arange(freq.size)
        freq_sel = None

    nfreq = freq.size

    # Compute flux of source
    inv_rt_flux_density = tools.invert_no_zero(
        np.sqrt(FluxCatalog[source].predict_flux(freq)))

    # Read in the eigenvaluess for all frequencies
    data = andata.CorrData.from_acq_h5(file_list,
                                       datasets=['erms', 'eval'],
                                       freq_sel=freq_sel,
                                       start=start,
                                       stop=stop)

    # Determine source coordinates
    this_csd = np.floor(ephemeris.unix_to_csd(np.median(data.time)))
    timestamp0 = ephemeris.transit_times(FluxCatalog[source].skyfield,
                                         ephemeris.csd_to_unix(this_csd))[0]
    src_ra, src_dec = ephemeris.object_coords(FluxCatalog[source].skyfield,
                                              date=timestamp0,
                                              deg=True)

    ra = ephemeris.lsa(data.time)
    ha = ra - src_ra
    ha = ha - (ha > 180.0) * 360.0 + (ha < -180.0) * 360.0
    ha = np.radians(ha)

    itrans = np.argmin(np.abs(ha))

    window = 0.75 * np.max(np.abs(ha))

    off_source = np.abs(ha) > window

    mlog.info("CSD %d" % this_csd)
    mlog.info("Hour angle at transit (%d of %d):  %0.2f deg   " %
              (itrans, len(ha), np.degrees(ha[itrans])))
    mlog.info("Hour angle off source: %0.2f deg" %
              np.median(np.abs(np.degrees(ha[off_source]))))

    src_dec = np.radians(src_dec)
    lat = np.radians(ephemeris.CHIMELATITUDE)

    # Determine division of frequencies
    ninput = data.ninput
    ntime = data.ntime
    nblock_freq = int(np.ceil(nfreq / float(config.nfreq_per_block)))

    # Determine bad inputs
    eps = 10.0 * np.finfo(data['erms'].dtype).eps
    good_freq = np.flatnonzero(np.all(data['erms'][:] > eps, axis=-1))
    ind_sub_freq = good_freq[slice(0, good_freq.size,
                                   max(int(good_freq.size / 10), 1))]

    tmp_data = andata.CorrData.from_acq_h5(file_list,
                                           datasets=['evec'],
                                           freq_sel=ind_sub_freq,
                                           start=start,
                                           stop=stop)
    eps = 10.0 * np.finfo(tmp_data['evec'].dtype).eps
    bad_input = np.flatnonzero(
        np.all(np.abs(tmp_data['evec'][:, 0]) < eps, axis=(0, 2)))

    input_axis = tmp_data.input.copy()

    del tmp_data

    # Query layout database for correlator inputs
    if inputmap is None:
        inputmap = tools.get_correlator_inputs(
            datetime.datetime.utcfromtimestamp(data.time[itrans]),
            correlator='chime')

    inputmap = tools.reorder_correlator_inputs(input_axis, inputmap)

    tools.change_chime_location(rotation=config.telescope_rotation)

    # Determine x and y pol index
    xfeeds = np.array([
        idf for idf, inp in enumerate(inputmap)
        if (idf not in bad_input) and tools.is_array_x(inp)
    ])
    yfeeds = np.array([
        idf for idf, inp in enumerate(inputmap)
        if (idf not in bad_input) and tools.is_array_y(inp)
    ])

    nfeed = xfeeds.size + yfeeds.size

    pol = [yfeeds, xfeeds]
    polstr = ['Y', 'X']
    npol = len(pol)

    neigen = min(max(npol, config.neigen), data['eval'].shape[1])

    phase_ref = config.phase_reference_index
    phase_ref_by_pol = [
        pol[pp].tolist().index(phase_ref[pp]) for pp in range(npol)
    ]

    # Calculate dynamic range
    eval0_off_source = np.median(data['eval'][:, 0, off_source], axis=-1)

    dyn = data['eval'][:, 1, :] * tools.invert_no_zero(
        eval0_off_source[:, np.newaxis])

    # Determine frequencies to mask
    not_rfi = np.ones((nfreq, 1), dtype=np.bool)
    if config.mask_rfi is not None:
        for frng in config.mask_rfi:
            not_rfi[:, 0] &= ((freq < frng[0]) | (freq > frng[1]))

    mlog.info("%0.1f percent of frequencies available after masking RFI." %
              (100.0 * np.sum(not_rfi, dtype=np.float32) / float(nfreq), ))

    #dyn_flg = utils.contiguous_flag(dyn > config.dyn_rng_threshold, centre=itrans)
    if source in config.dyn_rng_threshold:
        dyn_rng_threshold = config.dyn_rng_threshold[source]
    else:
        dyn_rng_threshold = config.dyn_rng_threshold.default

    mlog.info("Dynamic range threshold set to %0.1f." % dyn_rng_threshold)

    dyn_flg = dyn > dyn_rng_threshold

    # Calculate fit flag
    fit_flag = np.zeros((nfreq, npol, ntime), dtype=np.bool)
    for pp in range(npol):

        mlog.info("Dynamic Range Nsample, Pol %d:  %s" % (pp, ','.join([
            "%d" % xx for xx in np.percentile(np.sum(dyn_flg, axis=-1),
                                              [25, 50, 75, 100])
        ])))

        if config.nsigma1 is None:
            fit_flag[:, pp, :] = dyn_flg & not_rfi

        else:

            fit_window = config.nsigma1 * np.radians(
                utils.get_window(freq, pol=polstr[pp], dec=src_dec, deg=True))

            win_flg = np.abs(ha)[np.newaxis, :] <= fit_window[:, np.newaxis]

            fit_flag[:, pp, :] = (dyn_flg & win_flg & not_rfi)

    # Calculate base error
    base_err = data['erms'][:, np.newaxis, :]

    # Check for sign flips
    ref_resp = andata.CorrData.from_acq_h5(file_list,
                                           datasets=['evec'],
                                           input_sel=config.eigen_reference,
                                           freq_sel=freq_sel,
                                           start=start,
                                           stop=stop)['evec'][:, 0:neigen,
                                                              0, :]

    sign0 = 1.0 - 2.0 * (ref_resp.real < 0.0)

    # Check that we have the correct reference feed
    if np.any(np.abs(ref_resp.imag) > 0.0):
        ValueError("Reference feed %d is incorrect." % config.eigen_reference)

    del ref_resp

    # Save index_map
    results = {}
    results['model'] = model_name
    results['param'] = param_name
    results['freq'] = data.index_map['freq'][:]
    results['input'] = input_axis
    results['eval'] = ha_eval_str
    results['dir'] = direction

    for key, val in model_kwargs:
        results[key] = val

    # Initialize numpy arrays to hold results
    if config.return_response:

        results['response'] = np.zeros((nfreq, ninput, ntime),
                                       dtype=np.complex64)
        results['response_err'] = np.zeros((nfreq, ninput, ntime),
                                           dtype=np.float32)
        results['fit_flag'] = fit_flag
        results['ha_axis'] = ha
        results['ra'] = ra

    else:

        results['gain_eval'] = np.zeros((nfreq, ninput, ngain),
                                        dtype=np.complex64)
        results['weight_eval'] = np.zeros((nfreq, ninput, ngain),
                                          dtype=np.float32)
        results['frac_gain_err'] = np.zeros((nfreq, ninput, ngain, ndir),
                                            dtype=np.float32)

        results['parameter'] = np.zeros((nfreq, ninput, nparam),
                                        dtype=np.float32)
        results['parameter_err'] = np.zeros((nfreq, ninput, nparam),
                                            dtype=np.float32)

        results['index_eval'] = np.full((nfreq, ninput), -1, dtype=np.int8)
        results['gain'] = np.zeros((nfreq, ninput), dtype=np.complex64)
        results['weight'] = np.zeros((nfreq, ninput), dtype=np.float32)

        results['ndof'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32)
        results['chisq'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32)

        results['timing'] = np.zeros((nfreq, ninput), dtype=np.complex64)

    # Initialize metric like variables
    results['runtime'] = np.zeros((nblock_freq, 2), dtype=np.float64)

    # Compute distances
    dist = tools.get_feed_positions(inputmap)
    for pp, feeds in enumerate(pol):
        dist[feeds, :] -= dist[phase_ref[pp], np.newaxis, :]

    # Loop over frequency blocks
    for gg in range(nblock_freq):

        mlog.info("Frequency block %d of %d." % (gg, nblock_freq))

        fstart = gg * config.nfreq_per_block
        fstop = min((gg + 1) * config.nfreq_per_block, nfreq)
        findex = np.arange(fstart, fstop)
        ngroup = findex.size

        freq_sel = utils.convert_to_slice(index_freq[findex])

        timeit_start_gg = time.time()

        #
        if config.return_response:
            gstart = start
            gstop = stop

            tslc = slice(0, ntime)

        else:
            good_times = np.flatnonzero(np.any(fit_flag[findex], axis=(0, 1)))

            if good_times.size == 0:
                continue

            gstart = int(np.min(good_times))
            gstop = int(np.max(good_times)) + 1

            tslc = slice(gstart, gstop)

            gstart += start
            gstop += start

        hag = ha[tslc]
        itrans = np.argmin(np.abs(hag))

        # Load eigenvectors.
        nudata = andata.CorrData.from_acq_h5(
            file_list,
            datasets=['evec', 'vis', 'flags/vis_weight'],
            apply_gain=False,
            freq_sel=freq_sel,
            start=gstart,
            stop=gstop)

        # Save time to load data
        results['runtime'][gg, 0] = time.time() - timeit_start_gg
        timeit_start_gg = time.time()

        mlog.info("Time to load (per frequency):  %0.3f sec" %
                  (results['runtime'][gg, 0] / ngroup, ))

        # Loop over polarizations
        for pp, feeds in enumerate(pol):

            # Get timing correction
            if tcorr is not None:
                tgain = tcorr.get_gain(nudata.freq, nudata.input[feeds],
                                       nudata.time)
                tgain *= tgain[:, phase_ref_by_pol[pp], np.newaxis, :].conj()

                tgain_transit = tgain[:, :, itrans].copy()
                tgain *= tgain_transit[:, :, np.newaxis].conj()

            # Create the polarization masking vector
            P = np.zeros((1, ninput, 1), dtype=np.float64)
            P[:, feeds, :] = 1.0

            # Loop over frequencies
            for gff, ff in enumerate(findex):

                flg = fit_flag[ff, pp, tslc]

                if (2 * int(np.sum(flg))) < (nparam +
                                             1) and not config.return_response:
                    continue

                # Normalize by eigenvalue and correct for pi phase flips in process.
                resp = (nudata['evec'][gff, 0:neigen, :, :] *
                        np.sqrt(data['eval'][ff, 0:neigen, np.newaxis, tslc]) *
                        sign0[ff, :, np.newaxis, tslc])

                # Rotate to single-pol response
                # Move time to first axis for the matrix multiplication
                invL = tools.invert_no_zero(
                    np.rollaxis(data['eval'][ff, 0:neigen, np.newaxis, tslc],
                                -1, 0))

                UT = np.rollaxis(resp, -1, 0)
                U = np.swapaxes(UT, -1, -2)

                mu, vp = np.linalg.eigh(np.matmul(UT.conj(), P * U))

                rsign0 = (1.0 - 2.0 * (vp[:, 0, np.newaxis, :].real < 0.0))

                resp = mu[:, np.newaxis, :] * np.matmul(U, rsign0 * vp * invL)

                # Extract feeds of this pol
                # Transpose so that time is back to last axis
                resp = resp[:, feeds, -1].T

                # Compute error on response
                dataflg = ((nudata.weight[gff, feeds, :] > 0.0)
                           & np.isfinite(nudata.weight[gff, feeds, :])).astype(
                               np.float32)

                resp_err = dataflg * base_err[ff, :, tslc] * np.sqrt(
                    nudata.vis[gff, feeds, :].real) * tools.invert_no_zero(
                        np.sqrt(mu[np.newaxis, :, -1]))

                # Reference to specific input
                resp *= np.exp(
                    -1.0J *
                    np.angle(resp[phase_ref_by_pol[pp], np.newaxis, :]))

                # Apply timing correction
                if tcorr is not None:
                    resp *= tgain[gff]

                    results['timing'][ff, feeds] = tgain_transit[gff]

                # Fringestop
                lmbda = scipy.constants.c * 1e-6 / nudata.freq[gff]

                resp *= tools.fringestop_phase(
                    hag[np.newaxis, :], lat, src_dec,
                    dist[feeds, 0, np.newaxis] / lmbda,
                    dist[feeds, 1, np.newaxis] / lmbda)

                # Normalize by source flux
                resp *= inv_rt_flux_density[ff]
                resp_err *= inv_rt_flux_density[ff]

                # If requested, reference phase to the median value
                if config.med_phase_ref:
                    phi0 = np.angle(resp[:, itrans, np.newaxis])
                    resp *= np.exp(-1.0J * phi0)
                    resp *= np.exp(
                        -1.0J *
                        np.median(np.angle(resp), axis=0, keepdims=True))
                    resp *= np.exp(1.0J * phi0)

                # Check if return_response flag was set by user
                if not config.return_response:

                    if config.multi_sample:
                        moving_window = config.nsigma2 and config.nsigma2 * np.radians(
                            utils.get_window(nudata.freq[gff],
                                             pol=polstr[pp],
                                             dec=src_dec,
                                             deg=True))

                    # Loop over inputs
                    for pii, ii in enumerate(feeds):

                        is_good = flg & (np.abs(resp[pii, :]) >
                                         0.0) & (resp_err[pii, :] > 0.0)

                        # Set the intial gains based on raw response at transit
                        if is_good[itrans]:
                            results['gain_eval'][ff, ii,
                                                 0] = tools.invert_no_zero(
                                                     resp[pii, itrans])
                            results['frac_gain_err'][ff, ii, 0, :] = (
                                resp_err[pii, itrans] * tools.invert_no_zero(
                                    np.abs(resp[pii, itrans])))
                            results['weight_eval'][ff, ii, 0] = 0.5 * (
                                np.abs(resp[pii, itrans])**2 *
                                tools.invert_no_zero(resp_err[pii, itrans]))**2

                            results['index_eval'][ff, ii] = 0
                            results['gain'][ff,
                                            ii] = results['gain_eval'][ff, ii,
                                                                       0]
                            results['weight'][ff,
                                              ii] = results['weight_eval'][ff,
                                                                           ii,
                                                                           0]

                        # Exit if not performing multi time sample fit
                        if not config.multi_sample:
                            continue

                        if (2 * int(np.sum(is_good))) < (nparam + 1):
                            continue

                        try:
                            param, param_err, gain, gain_err, ndof, chisq, tval = fitter_function(
                                hag[is_good],
                                resp[pii, is_good],
                                resp_err[pii, is_good],
                                ha_eval,
                                window=moving_window,
                                tval=tval,
                                **config.fit)
                        except Exception as rex:
                            if config.verbose:
                                mlog.info(
                                    "Frequency %0.2f, Feed %d failed with error: %s"
                                    % (nudata.freq[gff], ii, rex))
                            continue

                        # Check for nan
                        wfit = (np.abs(gain) *
                                tools.invert_no_zero(np.abs(gain_err)))**2
                        if np.any(~np.isfinite(np.abs(gain))) or np.any(
                                ~np.isfinite(wfit)):
                            continue

                        # Save to results using the convention that you should *multiply* the visibilites by the gains
                        results['gain_eval'][
                            ff, ii, fitslc] = tools.invert_no_zero(gain)
                        results['frac_gain_err'][ff, ii, fitslc,
                                                 0] = gain_err.real
                        results['frac_gain_err'][ff, ii, fitslc,
                                                 1] = gain_err.imag
                        results['weight_eval'][ff, ii, fitslc] = wfit

                        results['parameter'][ff, ii, :] = param
                        results['parameter_err'][ff, ii, :] = param_err

                        results['ndof'][ff, ii, :] = ndof
                        results['chisq'][ff, ii, :] = chisq

                        # Check if the fit was succesful and update the gain evaluation index appropriately
                        if np.all((chisq / ndof.astype(np.float32)
                                   ) <= config.chisq_per_dof_threshold):
                            results['index_eval'][ff, ii] = ind_eval
                            results['gain'][ff, ii] = results['gain_eval'][
                                ff, ii, ind_eval]
                            results['weight'][ff, ii] = results['weight_eval'][
                                ff, ii, ind_eval]

                else:

                    # Return response only (do not fit model)
                    results['response'][ff, feeds, :] = resp
                    results['response_err'][ff, feeds, :] = resp_err

        # Save time to fit data
        results['runtime'][gg, 1] = time.time() - timeit_start_gg

        mlog.info("Time to fit (per frequency):  %0.3f sec" %
                  (results['runtime'][gg, 1] / ngroup, ))

        # Clean up
        del nudata
        gc.collect()

    # Print total run time
    mlog.info("TOTAL TIME TO LOAD: %0.3f min" %
              (np.sum(results['runtime'][:, 0]) / 60.0, ))
    mlog.info("TOTAL TIME TO FIT:  %0.3f min" %
              (np.sum(results['runtime'][:, 1]) / 60.0, ))

    # Set the best estimate of the gain
    if not config.return_response:

        flag = results['index_eval'] >= 0
        gain = results['gain']

        # Compute amplitude
        amp = np.abs(gain)

        # Hard cutoffs on the amplitude
        med_amp = np.median(amp[flag])
        min_amp = med_amp * config.min_amp_scale_factor
        max_amp = med_amp * config.max_amp_scale_factor

        flag &= ((amp >= min_amp) & (amp <= max_amp))

        # Flag outliers in amplitude for each frequency
        for pp, feeds in enumerate(pol):

            med_amp_by_pol = np.zeros(nfreq, dtype=np.float32)
            sig_amp_by_pol = np.zeros(nfreq, dtype=np.float32)

            for ff in range(nfreq):

                this_flag = flag[ff, feeds]

                if np.any(this_flag):

                    med, slow, shigh = utils.estimate_directional_scale(
                        amp[ff, feeds[this_flag]])
                    lower = med - config.nsigma_outlier * slow
                    upper = med + config.nsigma_outlier * shigh

                    flag[ff, feeds] &= ((amp[ff, feeds] >= lower) &
                                        (amp[ff, feeds] <= upper))

                    med_amp_by_pol[ff] = med
                    sig_amp_by_pol[ff] = 0.5 * (shigh - slow) / np.sqrt(
                        np.sum(this_flag, dtype=np.float32))

            if config.nsigma_med_outlier:

                med_flag = med_amp_by_pol > 0.0

                not_outlier = flag_outliers(med_amp_by_pol,
                                            med_flag,
                                            window=config.window_med_outlier,
                                            nsigma=config.nsigma_med_outlier)
                flag[:, feeds] &= not_outlier[:, np.newaxis]

                mlog.info("Pol %s:  %d frequencies are outliers." %
                          (polstr[pp],
                           np.sum(~not_outlier & med_flag, dtype=np.int)))

        # Determine bad frequencies
        flag_freq = (np.sum(flag, axis=1, dtype=np.float32) /
                     float(ninput)) > config.threshold_good_freq
        good_freq = np.flatnonzero(flag_freq)

        # Determine bad inputs
        fraction_good = np.sum(flag[good_freq, :], axis=0,
                               dtype=np.float32) / float(good_freq.size)
        flag_input = fraction_good > config.threshold_good_input

        # Finalize flag
        flag &= (flag_freq[:, np.newaxis] & flag_input[np.newaxis, :])

        # Interpolate gains
        interp_gain, interp_weight = interpolate_gain(
            freq,
            gain,
            results['weight'],
            flag=flag,
            length_scale=config.interpolation_length_scale,
            mlog=mlog)
        # Save gains to object
        results['flag'] = flag
        results['gain'] = interp_gain
        results['weight'] = interp_weight

    # Return results
    return results
Beispiel #7
0
def main(config_file=None, logging_params=DEFAULT_LOGGING):

    # Setup logging
    log.setup_logging(logging_params)
    mlog = log.get_logger(__name__)

    # Set config
    config = DEFAULTS.deepcopy()
    if config_file is not None:
        config.merge(NameSpace(load_yaml_config(config_file)))

    # Set niceness
    current_niceness = os.nice(0)
    os.nice(config.niceness - current_niceness)
    mlog.info('Changing process niceness from %d to %d.  Confirm:  %d' %
              (current_niceness, config.niceness, os.nice(0)))

    # Find acquisition files
    acq_files = sorted(glob(os.path.join(config.data_dir, config.acq, "*.h5")))
    nfiles = len(acq_files)

    # Determine time range of each file
    findex = []
    tindex = []
    for ii, filename in enumerate(acq_files):
        subdata = andata.CorrData.from_acq_h5(filename, datasets=())

        findex += [ii] * subdata.ntime
        tindex += range(subdata.ntime)

    findex = np.array(findex)
    tindex = np.array(tindex)

    # Determine transits within these files
    transits = []

    data = andata.CorrData.from_acq_h5(acq_files, datasets=())

    solar_rise = ephemeris.solar_rising(data.time[0] - 24.0 * 3600.0,
                                        end_time=data.time[-1])

    for rr in solar_rise:

        ss = ephemeris.solar_setting(rr)[0]

        solar_flag = np.flatnonzero((data.time >= rr) & (data.time <= ss))

        if solar_flag.size > 0:

            solar_flag = solar_flag[::config.downsample]

            tval = data.time[solar_flag]

            this_findex = findex[solar_flag]
            this_tindex = tindex[solar_flag]

            file_list, tindices = [], []

            for ii in range(nfiles):

                this_file = np.flatnonzero(this_findex == ii)

                if this_file.size > 0:

                    file_list.append(acq_files[ii])
                    tindices.append(this_tindex[this_file])

            date = ephemeris.unix_to_datetime(rr).strftime('%Y%m%dT%H%M%SZ')
            transits.append((date, tval, file_list, tindices))

    # Create file prefix and suffix
    prefix = []

    prefix.append("redundant_calibration")

    if config.output_prefix is not None:
        prefix.append(config.output_prefix)

    prefix = '_'.join(prefix)

    suffix = []

    if config.include_auto:
        suffix.append("wauto")
    else:
        suffix.append("noauto")

    if config.include_intracyl:
        suffix.append("wintra")
    else:
        suffix.append("nointra")

    if config.fix_degen:
        suffix.append("fixed_degen")
    else:
        suffix.append("degen")

    suffix = '_'.join(suffix)

    # Loop over solar transits
    for date, timestamps, files, time_indices in transits:

        nfiles = len(files)

        mlog.info("%s (%d files) " % (date, nfiles))

        output_file = os.path.join(config.output_dir,
                                   "%s_SUN_%s_%s.h5" % (prefix, date, suffix))

        mlog.info("Saving to:  %s" % output_file)

        # Get info about this set of files
        data = andata.CorrData.from_acq_h5(files,
                                           datasets=['flags/inputs'],
                                           apply_gain=False,
                                           renormalize=False)

        coord = sun_coord(timestamps, deg=True)

        fstart = config.freq_start if config.freq_start is not None else 0
        fstop = config.freq_stop if config.freq_stop is not None else data.freq.size
        freq_index = range(fstart, fstop)

        freq = data.freq[freq_index]

        ntime = timestamps.size
        nfreq = freq.size

        # Determind bad inputs
        if config.bad_input_file is None or not os.path.isfile(
                config.bad_input_file):
            bad_input = np.flatnonzero(
                ~np.all(data.flags['inputs'][:], axis=-1))
        else:
            with open(config.bad_input_file, 'r') as handler:
                bad_input = pickle.load(handler)

        mlog.info("%d inputs flagged as bad." % bad_input.size)

        nant = data.ninput

        # Determine polarization product maps
        dbinputs = tools.get_correlator_inputs(ephemeris.unix_to_datetime(
            timestamps[0]),
                                               correlator='chime')

        dbinputs = tools.reorder_correlator_inputs(data.input, dbinputs)

        feedpos = tools.get_feed_positions(dbinputs)

        prod = defaultdict(list)
        dist = defaultdict(list)

        for pp, this_prod in enumerate(data.prod):

            aa, bb = this_prod
            inp_aa = dbinputs[aa]
            inp_bb = dbinputs[bb]

            if (aa in bad_input) or (bb in bad_input):
                continue

            if not tools.is_chime(inp_aa) or not tools.is_chime(inp_bb):
                continue

            if not config.include_intracyl and (inp_aa.cyl == inp_bb.cyl):
                continue

            if not config.include_auto and (aa == bb):
                continue

            this_dist = list(feedpos[aa, :] - feedpos[bb, :])

            if tools.is_array_x(inp_aa) and tools.is_array_x(inp_bb):
                key = 'XX'

            elif tools.is_array_y(inp_aa) and tools.is_array_y(inp_bb):
                key = 'YY'

            elif not config.include_crosspol:
                continue

            elif tools.is_array_x(inp_aa) and tools.is_array_y(inp_bb):
                key = 'XY'

            elif tools.is_array_y(inp_aa) and tools.is_array_x(inp_bb):
                key = 'YX'

            else:
                raise RuntimeError("CHIME feeds not polarized.")

            prod[key].append(pp)
            dist[key].append(this_dist)

        polstr = sorted(prod.keys())
        polcnt = 0
        pol_sky_id = []
        bmap = {}
        for key in polstr:
            prod[key] = np.array(prod[key])
            dist[key] = np.array(dist[key])

            p_bmap, p_ubaseline = generate_mapping(dist[key])
            nubase = p_ubaseline.shape[0]

            bmap[key] = p_bmap + polcnt

            if polcnt > 0:

                ubaseline = np.concatenate((ubaseline, p_ubaseline), axis=0)
                pol_sky_id += [key] * nubase

            else:

                ubaseline = p_ubaseline.copy()
                pol_sky_id = [key] * nubase

            polcnt += nubase
            mlog.info("%d unique baselines" % polcnt)

        nsky = ubaseline.shape[0]

        # Create arrays to hold the results
        ores = {}
        ores['freq'] = freq
        ores['input'] = data.input
        ores['time'] = timestamps
        ores['coord'] = coord
        ores['pol'] = np.array(pol_sky_id)
        ores['baseline'] = ubaseline

        # Create array to hold gain results
        ores['gain'] = np.zeros((nfreq, nant, ntime), dtype=np.complex)
        ores['sky'] = np.zeros((nfreq, nsky, ntime), dtype=np.complex)
        ores['err'] = np.zeros((nfreq, nant + nsky, ntime, 2), dtype=np.float)

        # Loop over polarisations
        for key in polstr:

            reverse_map = bmap[key]
            p_prod = prod[key]

            isort = np.argsort(reverse_map)

            p_prod = p_prod[isort]

            p_ant1 = data.prod['input_a'][p_prod]
            p_ant2 = data.prod['input_b'][p_prod]
            p_vismap = reverse_map[isort]

            # Find the redundant groups
            tmp = np.where(np.diff(p_vismap) != 0)[0]
            edges = np.zeros(2 + tmp.size, dtype='int')
            edges[0] = 0
            edges[1:-1] = tmp + 1
            edges[-1] = p_vismap.size

            kept_base = np.unique(p_vismap)

            # Determine the unique antennas
            kept_ants = np.unique(np.concatenate([p_ant1, p_ant2]))
            antmap = np.zeros(kept_ants.max() + 1, dtype='int') - 1

            p_nant = kept_ants.size
            for i in range(p_nant):
                antmap[kept_ants[i]] = i

            p_ant1_use = antmap[p_ant1].copy()
            p_ant2_use = antmap[p_ant2].copy()

            # Create matrix
            p_nvis = p_prod.size
            nred = edges.size - 1

            npar = p_nant + nred

            A = np.zeros((p_nvis, npar), dtype=np.float32)
            B = np.zeros((p_nvis, npar), dtype=np.float32)

            for kk in range(p_nant):

                flag_ant1 = p_ant1_use == kk
                if np.any(flag_ant1):
                    A[flag_ant1, kk] = 1.0
                    B[flag_ant1, kk] = 1.0

                flag_ant2 = p_ant2_use == kk
                if np.any(flag_ant2):
                    A[flag_ant2, kk] = 1.0
                    B[flag_ant2, kk] = -1.0

            for ee in range(nred):

                A[edges[ee]:edges[ee + 1], p_nant + ee] = 1.0

                B[edges[ee]:edges[ee + 1], p_nant + ee] = 1.0

            # Add equations to break degeneracy
            if config.fix_degen:
                A = np.concatenate((A, np.zeros((1, npar), dtype=np.float32)))
                A[-1, 0:p_nant] = 1.0

                B = np.concatenate((B, np.zeros((3, npar), dtype=np.float32)))
                B[-3, 0:p_nant] = 1.0
                B[-2, 0:p_nant] = feedpos[kept_ants, 0]
                B[-1, 0:p_nant] = feedpos[kept_ants, 1]

            # Loop over frequencies
            for ff, find in enumerate(freq_index):

                mlog.info("Freq %d of %d.  %0.2f MHz." %
                          (ff + 1, nfreq, freq[ff]))

                cnt = 0

                # Loop over files
                for ii, (filename, tind) in enumerate(zip(files,
                                                          time_indices)):

                    ntind = len(tind)
                    mlog.info("Processing file %s (%d time samples)" %
                              (filename, ntind))

                    # Compute noise weight
                    with h5py.File(filename, 'r') as hf:
                        wnoise = np.median(hf['flags/vis_weight'][find, :, :],
                                           axis=-1)

                    # Loop over times
                    for tt in tind:

                        t0 = time.time()

                        mlog.info("Time %d of %d.  %d index of current file." %
                                  (cnt + 1, ntime, tt))

                        # Load visibilities
                        with h5py.File(filename, 'r') as hf:

                            snap = hf['vis'][find, :, tt]
                            wsnap = wnoise * (
                                (hf['flags/vis_weight'][find, :, tt] > 0.0) &
                                (np.abs(snap) > 0.0)).astype(np.float32)

                        # Extract relevant products for this polarization
                        snap = snap[p_prod]
                        wsnap = wsnap[p_prod]

                        # Turn into amplitude and phase, avoiding NaN
                        mask = (wsnap > 0.0)

                        amp = np.where(mask, np.log(np.abs(snap)), 0.0)
                        phi = np.where(mask, np.angle(snap), 0.0)

                        # Deal with phase wrapping
                        for aa, bb in zip(edges[:-1], edges[1:]):
                            dphi = phi[aa:bb] - np.sort(phi[aa:bb])[int(
                                (bb - aa) / 2)]
                            phi[aa:bb] += (2.0 * np.pi * (dphi < -np.pi) -
                                           2.0 * np.pi * (dphi > np.pi))

                        # Add elements to fix degeneracy
                        if config.fix_degen:
                            amp = np.concatenate((amp, np.zeros(1)))
                            phi = np.concatenate((phi, np.zeros(3)))

                        # Determine noise matrix
                        inv_diagC = wsnap * np.abs(snap)**2 * 2.0

                        if config.fix_degen:
                            inv_diagC = np.concatenate((inv_diagC, np.ones(1)))

                        # Amplitude estimate and covariance
                        amp_param_cov = np.linalg.inv(
                            np.dot(A.T, inv_diagC[:, np.newaxis] * A))
                        amp_param = np.dot(amp_param_cov,
                                           np.dot(A.T, inv_diagC * amp))

                        # Phase estimate and covariance
                        if config.fix_degen:
                            inv_diagC = np.concatenate((inv_diagC, np.ones(2)))

                        phi_param_cov = np.linalg.inv(
                            np.dot(B.T, inv_diagC[:, np.newaxis] * B))
                        phi_param = np.dot(phi_param_cov,
                                           np.dot(B.T, inv_diagC * phi))

                        # Save to large array
                        ores['gain'][ff, kept_ants,
                                     cnt] = np.exp(amp_param[0:p_nant] +
                                                   1.0J * phi_param[0:p_nant])

                        ores['sky'][ff, kept_base,
                                    cnt] = np.exp(amp_param[p_nant:] +
                                                  1.0J * phi_param[p_nant:])

                        ores['err'][ff, kept_ants, cnt,
                                    0] = np.diag(amp_param_cov[0:p_nant,
                                                               0:p_nant])
                        ores['err'][ff, nant + kept_base, cnt,
                                    0] = np.diag(amp_param_cov[p_nant:,
                                                               p_nant:])

                        ores['err'][ff, kept_ants, cnt,
                                    1] = np.diag(phi_param_cov[0:p_nant,
                                                               0:p_nant])
                        ores['err'][ff, nant + kept_base, cnt,
                                    1] = np.diag(phi_param_cov[p_nant:,
                                                               p_nant:])

                        # Increment time counter
                        cnt += 1

                        # Print time elapsed
                        mlog.info("Took %0.1f seconds." % (time.time() - t0, ))

        # Save to pickle file
        with h5py.File(output_file, 'w') as handler:

            handler.attrs['date'] = date

            for key, val in ores.iteritems():
                handler.create_dataset(key, data=val)
Beispiel #8
0
def fs_from_file(filename,
                 frq,
                 src,
                 del_t=900,
                 transposed=True,
                 subtract_avg=False):

    f = h5py.File(filename, 'r')

    times = f['index_map']['time'].value['ctime'] + 10.6

    src_trans = eph.transit_times(src, times[0])

    # try to account for differential arrival time from cylinder rotation.

    del_phi = (src._dec - np.radians(eph.CHIMELATITUDE)) * np.sin(
        np.radians(1.988))
    del_phi *= (24 * 3600.0) / (2 * np.pi)

    # Adjust the transit time accordingly
    src_trans += del_phi

    # Select +- del_t of transit, accounting for the mispointing
    t_range = np.where((times < src_trans + del_t)
                       & (times > src_trans - del_t))[0]

    times = times[t_range[0]:t_range[-1]]  #[offp::2] test

    print "Time range:", times[0], times[-1]

    print "\n...... This data is from %s starting at RA: %f ...... \n" \
        % (eph.unix_to_datetime(times[0]), eph.transit_RA(times[0]))

    if transposed is True:
        v = f['vis'][frq[0]:frq[-1] + 1, :]
        v = v[..., t_range[0]:t_range[-1]]
        vis = v['r'] + 1j * v['i']

        del v

    # Read in time and freq slice if data has not yet been transposed
    if transposed is False:
        v = f['vis'][t_range[0]:t_range[-1], frq[0]:frq[-1] + 1, :]
        vis = v['r'][:] + 1j * v['i'][:]
        del v
        vis = np.transpose(vis, (1, 2, 0))

    inp = gen_inp()[0]

    # Remove offset from galaxy
    if subtract_avg is True:
        vis -= 0.5 * (vis[..., 0] + vis[..., -1])[..., np.newaxis]

    freq_MHZ = 800.0 - np.array(frq) / 1024.0 * 400.
    print len(inp)

    baddies = np.where(np.isnan(tools.get_feed_positions(inp)[:, 0]))[0]

    # Fringestop to location of "src"

    data_fs = tools.fringestop_pathfinder(vis, eph.transit_RA(times), freq_MHZ,
                                          inp, src)
    #    data_fs = fringestop_pathfinder(vis, eph.transit_RA(times), freq_MHZ, inp, src)

    return data_fs
Beispiel #9
0
def solve_ps_transit(filename,
                     corrs,
                     feeds,
                     inp,
                     src,
                     nfreq=1024,
                     transposed=False,
                     nfeed=128):
    """ Function that fringestops time slice 
    where point source is in the beam, takes 
    all correlations for a given polarization, and then 
    eigendecomposes the correlation matrix freq by freq
    after removing the fpga phases. It will also 
    plot intermediate steps to verify the phase solution.

    Parameters
    ----------
    filename : np.str
         Full-path filename 
    corrs : list
         List of correlations to use in solver
    feeds : list
         List of feeds to use
    inp   : 
         Correlator inputs (output of ch_util.tools.get_correlator_inputs)
    src   : ephem.FixedBody
         Source to calibrate off of. e.g. ch_util.ephemeris.TauA
    
    Returns
    -------
    Gains : np.array
         Complex gain array (nfreq, nfeed) 
    """

    nsplit = 32  # Number of freq chunks to divide nfreq into
    del_t = 800

    f = h5py.File(filename, 'r')

    # Add half an integration time to each. Hack.
    times = f['index_map']['time'].value['ctime'] + 10.50
    src_trans = eph.transit_times(src, times[0])

    # try to account for differential arrival time from
    # cylinder rotation.
    del_phi = (src._dec - np.radians(eph.CHIMELATITUDE)) \
                 * np.sin(np.radians(1.988))
    del_phi *= (24 * 3600.0) / (2 * np.pi)

    # Adjust the transit time accordingly
    src_trans += del_phi

    # Select +- del_t of transit, accounting for the mispointing
    t_range = np.where((times < src_trans + del_t)
                       & (times > src_trans - del_t))[0]

    print "\n...... This data is from %s starting at RA: %f ...... \n" \
        % (eph.unix_to_datetime(times[0]), eph.transit_RA(times[0]))

    assert (len(t_range) > 0), "Source is not in this acq"

    # Create gains array to fill in solution
    Gains = np.zeros([nfreq, nfeed], np.complex128)

    print "Starting the solver"

    times = times[t_range[0]:t_range[-1]]

    k = 0

    # Start at a strong freq channel that can be plotted
    # and from which we can find the noise source on-sample
    for i in range(12, nsplit) + range(0, 12):

        k += 1

        # Divides the arrays up into nfreq / nsplit freq chunks and solves those
        frq = range(i * nfreq // nsplit, (i + 1) * nfreq // nsplit)

        print "      %d:%d \n" % (frq[0], frq[-1])

        # Read in time and freq slice if data has already been transposed
        if transposed is True:
            v = f['vis'][frq[0]:frq[-1] + 1, corrs, :]
            v = v[..., t_range[0]:t_range[-1]]
            vis = v['r'] + 1j * v['i']

            if k == 1:
                autos = auto_corrs(nfeed)
                offp = (abs(vis[:, autos, 0::2]).mean() > \
                        (abs(vis[:, autos, 1::2]).mean())).astype(int)

                times = times[offp::2]

            vis = vis[..., offp::2]

            gg = f['gain_coeff'][frq[0]:frq[-1] + 1, feeds,
                                 t_range[0]:t_range[-1]][..., offp::2]

            gain_coeff = gg['r'] + 1j * gg['i']

            del gg

        # Read in time and freq slice if data has not yet been transposed
        if transposed is False:
            print "TRANSPOSED V OF CODE DOESN'T WORK YET!"
            v = f['vis'][t_range[0]:t_range[-1]:2, frq[0]:frq[-1] + 1, corrs]
            vis = v['r'][:] + 1j * v['i'][:]
            del v

            gg = f['gain_coeff'][0, frq[0]:frq[-1] + 1, feeds]
            gain_coeff = gg['r'][:] + 1j * gg['i'][:]

            vis = vis[..., offp::2]

            vis = np.transpose(vis, (1, 2, 0))

        # Remove fpga gains from data
        vis = remove_fpga_gains(vis, gain_coeff, nfeed=nfeed, triu=False)

        # Remove offset from galaxy
        vis -= 0.5 * (vis[..., 0] + vis[..., -1])[..., np.newaxis]

        # Get physical freq for fringestopper
        freq_MHZ = 800.0 - np.array(frq) / 1024.0 * 400.

        baddies = np.where(np.isnan(tools.get_feed_positions(inp)[:, 0]))[0]
        a, b, c = select_corrs(baddies, nfeed=128)

        vis[:, a + b] = 0.0

        # Fringestop to location of "src"
        data_fs = tools.fringestop_pathfinder(vis, eph.transit_RA(times),
                                              freq_MHZ, inp, src)

        del vis

        dr, sol_arr = solve_gain(data_fs)

        # Find index of point source transit
        drlist = np.argmax(dr, axis=-1)

        # If multiple freq channels are zerod, the trans_pix
        # will end up being 0. This is bad, so ensure that
        # you are only looking for non-zero transit pixels.
        drlist = [x for x in drlist if x != 0]
        trans_pix = np.argmax(np.bincount(drlist))

        assert trans_pix != 0.0

        Gains[frq] = sol_arr[..., trans_pix - 3:trans_pix + 4].mean(-1)

        zz = h5py.File('data' + str(i) + '.hdf5', 'w')
        zz.create_dataset('data', data=dr)
        zz.close()

        print "%f, %d Nans out of %d" % (np.isnan(sol_arr).sum(),
                                         np.isnan(Gains[frq]).sum(),
                                         np.isnan(Gains[frq]).sum())
        print trans_pix, sol_arr[..., trans_pix - 3:trans_pix +
                                 4].mean(-1).sum(), sol_arr.mean(-1).sum()

        # Plot up post-fs phases to see if everything has been fixed
        if frq[0] == 12 * nsplit:
            print "======================"
            print "   Plotting up freq: %d" % frq[0]
            print "======================"
            img_nm = './phs_plots/dfs' + np.str(frq[17]) + np.str(
                np.int(time.time())) + '.png'
            img_nmcorr = './phs_plots/dfs' + np.str(frq[17]) + np.str(
                np.int(time.time())) + 'corr.png'

            plt_gains(data_fs, 0, img_name=img_nm, bad_chans=baddies)
            dfs_corr = correct_dfs(data_fs,
                                   np.angle(Gains[frq])[..., np.newaxis],
                                   nfeed=128)

            plt_gains(dfs_corr, 0, img_name=img_nmcorr, bad_chans=baddies)

            del dfs_corr

        del data_fs, a

    return Gains
Beispiel #10
0
    def process(self, sstream, inputmap):
        """Clean the sun.

        Parameters
        ----------
        sstream : containers.SiderealStream
            Sidereal stream.

        Returns
        -------
        mstream : containers.SiderealStream
            Sidereal stack with sun projected out.
        """

        sstream.redistribute("freq")

        # Get array of CSDs for each sample
        ra = sstream.index_map["ra"][:]
        csd = sstream.attrs[
            "lsd"] if "lsd" in sstream.attrs else sstream.attrs["csd"]
        csd = csd + ra / 360.0

        nprod = len(sstream.index_map["prod"])

        # Get position of sun at every time sample
        times = ephemeris.csd_to_unix(csd)
        sun_pos = np.array([
            ra_dec_of(ephemeris.skyfield_wrapper.ephemeris["sun"], t)
            for t in times
        ])

        # Get hour angle and dec of sun, in radians
        ha = 2 * np.pi * (ra / 360.0) - sun_pos[:, 0]
        dec = sun_pos[:, 1]
        el = sun_pos[:, 2]

        # Construct baseline vector for each visibility
        feed_pos = tools.get_feed_positions(inputmap)
        vis_pos = np.array([
            feed_pos[ii] - feed_pos[ij]
            for ii, ij in sstream.index_map["prod"][:]
        ])

        feed_list = [(inputmap[fi], inputmap[fj])
                     for fi, fj in sstream.index_map["prod"][:]]

        # Determine polarisation for each visibility
        pol_ind = np.full(len(feed_list), -1, dtype=np.int)
        for ii, (fi, fj) in enumerate(feed_list):
            if tools.is_chime(fi) and tools.is_chime(fj):
                pol_ind[ii] = 2 * tools.is_chime_y(fi) + tools.is_chime_y(fj)

        # Change vis_pos for non-CHIME feeds from NaN to 0.0
        vis_pos[(pol_ind == -1), :] = 0.0

        # Initialise new container
        sscut = sstream.__class__(axes_from=sstream, attrs_from=sstream)
        sscut.redistribute("freq")

        wv = 3e2 / sstream.index_map["freq"]["centre"]

        # Iterate over frequencies and polarisations to null out the sun
        for lfi, fi in sstream.vis[:].enumerate(0):

            # Get the baselines in wavelengths
            u = vis_pos[:, 0] / wv[fi]
            v = vis_pos[:, 1] / wv[fi]

            # Loop over ra to reduce memory usage
            for ri in range(len(ra)):

                # Copy over the visiblities and weights
                vis = sstream.vis[fi, :, ri]
                weight = sstream.weight[fi, :, ri]
                sscut.vis[fi, :, ri] = vis
                sscut.weight[fi, :, ri] = weight

                # Check if sun has set
                if el[ri] > 0.0:

                    # Calculate the phase that the sun would have using the fringestop routine
                    sun_vis = tools.fringestop_phase(
                        ha[ri], np.radians(ephemeris.CHIMELATITUDE), dec[ri],
                        u, v)

                    # Calculate the visibility vector for the sun
                    sun_vis = sun_vis.conj()

                    # Mask out the auto-correlations
                    sun_vis *= np.logical_or(u != 0.0, v != 0.0)

                    # Iterate over polarisations to do projection independently for each.
                    # This is needed because of the different beams for each pol.
                    for pol in range(4):

                        # Mask out other polarisations in the visibility vector
                        sun_vis_pol = sun_vis * (pol_ind == pol)

                        # Calculate various projections
                        vds = (vis * sun_vis_pol.conj() * weight).sum(axis=0)
                        sds = (sun_vis_pol * sun_vis_pol.conj() *
                               weight).sum(axis=0)
                        isds = tools.invert_no_zero(sds)

                        # Subtract sun contribution from visibilities and place in new array
                        sscut.vis[fi, :, ri] -= sun_vis_pol * vds * isds

        # Return the clean sidereal stream
        return sscut
Beispiel #11
0
    def process(self, sstream, inputmap, inputmask):
        """Determine calibration from a timestream.

        Parameters
        ----------
        sstream : andata.CorrData or containers.SiderealStream
            Timestream collected during the day.
        inputmap : list of :class:`CorrInput`
            A list describing the inputs as they are in the file.
        inputmask : containers.CorrInputMask
            Mask indicating which correlator inputs to use in the
            eigenvalue decomposition.

        Returns
        -------
        suntrans : containers.SunTransit
            Response to the sun.
        """

        from operator import itemgetter
        from itertools import groupby
        from .calibration import _extract_diagonal, solve_gain

        # Ensure that we are distributed over frequency
        sstream.redistribute("freq")

        # Find the local frequencies
        nfreq = sstream.vis.local_shape[0]
        sfreq = sstream.vis.local_offset[0]
        efreq = sfreq + nfreq

        # Get the local frequency axis
        freq = sstream.freq["centre"][sfreq:efreq]
        wv = 3e2 / freq

        # Get times
        if hasattr(sstream, "time"):
            time = sstream.time
            ra = ephemeris.transit_RA(time)
        else:
            ra = sstream.index_map["ra"][:]
            csd = (sstream.attrs["lsd"]
                   if "lsd" in sstream.attrs else sstream.attrs["csd"])
            csd = csd + ra / 360.0
            time = ephemeris.csd_to_unix(csd)

        # Only examine data between sunrise and sunset
        time_flag = np.zeros(len(time), dtype=np.bool)
        rise = ephemeris.solar_rising(time[0] - 24.0 * 3600.0,
                                      end_time=time[-1])
        for rr in rise:
            ss = ephemeris.solar_setting(rr)[0]
            time_flag |= (time >= rr) & (time <= ss)

        if not np.any(time_flag):
            self.log.debug(
                "No daytime data between %s and %s.",
                ephemeris.unix_to_datetime(time[0]).strftime("%b %d %H:%M"),
                ephemeris.unix_to_datetime(time[-1]).strftime("%b %d %H:%M"),
            )
            return None

        # Convert boolean flag to slices
        time_index = np.where(time_flag)[0]

        time_slice = []
        ntime = 0
        for key, group in groupby(
                enumerate(time_index),
                lambda index_item: index_item[0] - index_item[1]):
            group = list(map(itemgetter(1), group))
            ngroup = len(group)
            time_slice.append(
                (slice(group[0], group[-1] + 1), slice(ntime, ntime + ngroup)))
            ntime += ngroup

        time = np.concatenate([time[slc[0]] for slc in time_slice])
        ra = np.concatenate([ra[slc[0]] for slc in time_slice])

        # Get ra, dec, alt of sun
        sun_pos = np.array([
            ra_dec_of(ephemeris.skyfield_wrapper.ephemeris["sun"], t)
            for t in time
        ])

        # Convert from ra to hour angle
        sun_pos[:, 0] = np.radians(ra) - sun_pos[:, 0]

        # Determine good inputs
        nfeed = len(inputmap)
        good_input = np.arange(
            nfeed, dtype=np.int)[inputmask.datasets["input_mask"][:]]

        # Use input map to figure out which are the X and Y feeds
        xfeeds = np.array([
            idx for idx, inp in enumerate(inputmap)
            if tools.is_chime_x(inp) and (idx in good_input)
        ])
        yfeeds = np.array([
            idx for idx, inp in enumerate(inputmap)
            if tools.is_chime_y(inp) and (idx in good_input)
        ])

        self.log.debug(
            "Performing sun calibration with %d/%d good feeds (%d xpol, %d ypol).",
            len(good_input),
            nfeed,
            len(xfeeds),
            len(yfeeds),
        )

        # Construct baseline vector for each visibility
        feed_pos = tools.get_feed_positions(inputmap)
        vis_pos = np.array([
            feed_pos[ii] - feed_pos[ij]
            for ii, ij in sstream.index_map["prod"][:]
        ])
        vis_pos = np.where(np.isnan(vis_pos), np.zeros_like(vis_pos), vis_pos)

        u = (vis_pos[np.newaxis, :, 0] / wv[:, np.newaxis])[:, :, np.newaxis]
        v = (vis_pos[np.newaxis, :, 1] / wv[:, np.newaxis])[:, :, np.newaxis]

        # Create container to hold results of fit
        suntrans = containers.SunTransit(time=time,
                                         pol_x=xfeeds,
                                         pol_y=yfeeds,
                                         axes_from=sstream)
        for key in suntrans.datasets.keys():
            suntrans.datasets[key][:] = 0.0

        # Set coordinates
        suntrans.coord[:] = sun_pos

        # Loop over time slices
        for slc_in, slc_out in time_slice:

            # Extract visibility slice
            vis_slice = sstream.vis[..., slc_in].copy()

            ha = (sun_pos[slc_out, 0])[np.newaxis, np.newaxis, :]
            dec = (sun_pos[slc_out, 1])[np.newaxis, np.newaxis, :]

            # Extract the diagonal (to be used for weighting)
            norm = (_extract_diagonal(vis_slice, axis=1).real)**0.5
            norm = tools.invert_no_zero(norm)

            # Fringestop
            if self.fringestop:
                vis_slice *= tools.fringestop_phase(
                    ha, np.radians(ephemeris.CHIMELATITUDE), dec, u, v)

            # Solve for the point source response of each set of polarisations
            ev_x, resp_x, err_resp_x = solve_gain(vis_slice,
                                                  feeds=xfeeds,
                                                  norm=norm[:, xfeeds])
            ev_y, resp_y, err_resp_y = solve_gain(vis_slice,
                                                  feeds=yfeeds,
                                                  norm=norm[:, yfeeds])

            # Save to container
            suntrans.evalue_x[..., slc_out] = ev_x
            suntrans.evalue_y[..., slc_out] = ev_y

            suntrans.response[:, xfeeds, slc_out] = resp_x
            suntrans.response[:, yfeeds, slc_out] = resp_y

            suntrans.response_error[:, xfeeds, slc_out] = err_resp_x
            suntrans.response_error[:, yfeeds, slc_out] = err_resp_y

        # If requested, fit a model to the primary beam of the sun transit
        if self.model_fit:

            # Estimate peak RA
            i_transit = np.argmin(np.abs(sun_pos[:, 0]))

            body = ephemeris.skyfield_wrapper.ephemeris["sun"]
            obs = ephemeris._get_chime()
            obs.date = ephemeris.unix_to_ephem_time(time[i_transit])
            body.compute(obs)

            peak_ra = ephemeris.peak_RA(body)
            dra = ra - peak_ra
            dra = np.abs(dra - (dra > np.pi) * 2.0 * np.pi)[np.newaxis,
                                                            np.newaxis, :]

            # Estimate FWHM
            sig_x = cal_utils.guess_fwhm(freq,
                                         pol="X",
                                         dec=body.dec,
                                         sigma=True)[:, np.newaxis, np.newaxis]
            sig_y = cal_utils.guess_fwhm(freq,
                                         pol="Y",
                                         dec=body.dec,
                                         sigma=True)[:, np.newaxis, np.newaxis]

            # Only fit ra values above the specified dynamic range threshold
            fit_flag = np.zeros([nfreq, nfeed, ntime], dtype=np.bool)
            fit_flag[:, xfeeds, :] = dra < (self.nsig * sig_x)
            fit_flag[:, yfeeds, :] = dra < (self.nsig * sig_y)

            # Fit model for the complex response of each feed to the point source
            param, param_cov = cal_utils.fit_point_source_transit(
                ra,
                suntrans.response[:],
                suntrans.response_error[:],
                flag=fit_flag)

            # Save to container
            suntrans.add_dataset("flag")
            suntrans.flag[:] = fit_flag

            suntrans.add_dataset("parameter")
            suntrans.parameter[:] = param

            suntrans.add_dataset("parameter_cov")
            suntrans.parameter_cov[:] = param_cov

        # Update attributes
        units = "sqrt(" + sstream.vis.attrs.get("units",
                                                "correlator-units") + ")"
        suntrans.response.attrs["units"] = units
        suntrans.response_error.attrs["units"] = units

        suntrans.attrs["source"] = "Sun"

        # Return sun transit
        return suntrans