Example #1
0
    def is_daytime(self, key, csd):

        src = self[key] if isinstance(key, basestring) else key

        is_daytime = 0

        src_ra, src_dec = ephemeris.object_coords(src.body, date=ephemeris.csd_to_unix(csd), deg=True)

        transit_start = ephemeris.csd_to_unix(csd + (src_ra - src.window) / 360.0)
        transit_end = ephemeris.csd_to_unix(csd + (src_ra + src.window) / 360.0)

        solar_rise = ephemeris.solar_rising(transit_start - 24.0*3600.0, end_time=transit_end)

        for rr in solar_rise:

            ss = ephemeris.solar_setting(rr)[0]

            rrex = rr + self._extend_night
            ssex = ss - self._extend_night

            if ((transit_start <= ssex) and (rrex <= transit_end)):

                is_daytime += 1

                tt = ephemeris.solar_transit(rr)[0]
                if (transit_start <= tt) and (tt <= transit_end):
                    is_daytime += 1

                break

        return is_daytime
Example #2
0
    def add_file(self, filename):

        # Make sure this file is not currently in the transit tracker
        if self.contains_file(filename):
            return

        # Read file time range
        with h5py.File(filename, 'r') as handler:
            timestamp = handler['index_map']['time']['ctime'][:]

        timestamp0 = np.median(timestamp)

        # Convert to right ascension
        ra = ephemeris.lsa(timestamp)
        csd = ephemeris.csd(timestamp)

        # Loop over available sources
        for name, src in self.iteritems():

            src_ra, src_dec = ephemeris.object_coords(src.body,
                                                      date=timestamp0,
                                                      deg=True)

            src_ra = (src_ra + src.shift) % 360.0

            # Determine if any times in this file fall
            # in a window around transit of this source
            hour_angle = ra - src_ra
            hour_angle = hour_angle + 360.0 * (hour_angle < -180.0) - 360.0 * (
                hour_angle > 180.0)

            good_time = np.flatnonzero(np.abs(hour_angle) < src.window)

            if good_time.size > 0:

                # Determine the csd for the transit contained in this file
                icsd = np.unique(
                    np.floor(csd[good_time] - (hour_angle[good_time] / 360.0)))

                if icsd.size > 1:
                    RuntimeError("Error estimating CSD.")

                key = int(icsd[0])

                min_ha, max_ha = np.percentile(hour_angle, [0, 100])

                # Add to list of files to analyze for this source
                if key in src.files:
                    src.files[key].append((filename, hour_angle))
                    src.file_span[key][0] = min(min_ha, src.file_span[key][0])
                    src.file_span[key][1] = max(max_ha, src.file_span[key][1])

                else:
                    src.files[key] = [(filename, hour_angle)]
                    src.file_span[key] = [min_ha, max_ha]
Example #3
0
    def _finalize_transit(self):
        """Concatenate grouped time streams for the currrent transit."""

        # Find where transit starts and ends
        if len(self.tstreams) == 0 or self.cur_transit is None:
            self.log.info("Did not find any transits.")
            return None
        self.log.debug(
            "Finalising transit for {}...".format(
                ephem.unix_to_datetime(self.cur_transit)
            )
        )
        all_t = np.concatenate([ts.time for ts in self.tstreams])
        start_ind = int(np.argmin(np.abs(all_t - self.start_t)))
        stop_ind = int(np.argmin(np.abs(all_t - self.end_t)))

        # Save list of filenames
        filenames = [ts.attrs["filename"] for ts in self.tstreams]

        dt = self.tstreams[0].time[1] - self.tstreams[0].time[0]
        if dt <= 0:
            self.log.warning(
                "Time steps are not positive definite: dt={:.3f}".format(dt)
                + " Skipping."
            )
            ts = None
        if stop_ind - start_ind > int(self.min_span / 360.0 * SIDEREAL_DAY_SEC / dt):
            if len(self.tstreams) > 1:
                # Concatenate timestreams
                ts = tod.concatenate(self.tstreams, start=start_ind, stop=stop_ind)
            else:
                ts = self.tstreams[0]
            _, dec = ephem.object_coords(
                self.src, all_t[0], deg=True, obs=self.observer
            )
            ts.attrs["dec"] = dec
            ts.attrs["source_name"] = self.source
            ts.attrs["transit_time"] = self.cur_transit
            ts.attrs["observation_id"] = self.obs_id
            ts.attrs["tag"] = "{}_{:0>4d}_{}".format(
                self.source,
                self.obs_id,
                ephem.unix_to_datetime(self.cur_transit).strftime("%Y%m%dT%H%M%S"),
            )
            ts.attrs["archivefiles"] = filenames
        else:
            self.log.info("Transit too short. Skipping.")
            ts = None

        self.tstreams = []
        self.cur_transit = None

        return ts
Example #4
0
    def process(self, data):
        """Regrid visibility data onto a regular grid in hour angle.

        Parameters
        ----------
        data : TimeStream
            Time-ordered data.

        Returns
        -------
        new_data : SiderealStream
            The regridded data centered on the source RA.
        """

        # Redistribute if needed
        data.redistribute("freq")

        # View of data
        weight = data.weight[:].view(np.ndarray)
        vis_data = data.vis[:].view(np.ndarray)

        # Get apparent source RA, including precession effects
        ra, _ = ephem.object_coords(self.src, data.time[0], deg=True, obs=self.observer)
        # Get catalogue RA for reference
        ra_icrs, _ = ephem.object_coords(self.src, deg=True, obs=self.observer)

        # Convert input times to hour angle
        lha = unwrap_lha(self.observer.unix_to_lsa(data.time), ra)

        # perform regridding
        success = 1
        try:
            new_grid, new_vis, ni = self._regrid(vis_data, weight, lha)
        except np.linalg.LinAlgError as e:
            self.log.error(str(e))
            success = 0
        except ValueError as e:
            self.log.error(str(e))
            success = 0
        # Check other ranks have completed
        success = mpiutil.allreduce(success)
        if success != mpiutil.size:
            self.log.warning("Regridding failed. Skipping transit.")
            return None

        # mask out regions beyond bounds of this transit
        grid_mask = np.ones_like(new_grid)
        grid_mask[new_grid < lha.min()] = 0.0
        grid_mask[new_grid > lha.max()] = 0.0
        new_vis *= grid_mask
        ni *= grid_mask

        # Wrap to produce MPIArray
        if data.distributed:
            new_vis = mpiarray.MPIArray.wrap(new_vis, axis=data.vis.distributed_axis)
            ni = mpiarray.MPIArray.wrap(ni, axis=data.vis.distributed_axis)

        # Create new container for output
        ra_grid = (new_grid + ra) % 360.0
        new_data = SiderealStream(
            axes_from=data, attrs_from=data, ra=ra_grid, comm=data.comm
        )
        new_data.redistribute("freq")
        new_data.vis[:] = new_vis
        new_data.weight[:] = ni
        new_data.attrs["cirs_ra"] = ra
        new_data.attrs["icrs_ra"] = ra_icrs

        return new_data
Example #5
0
def offline_point_source_calibration(file_list,
                                     source,
                                     inputmap=None,
                                     start=None,
                                     stop=None,
                                     physical_freq=None,
                                     tcorr=None,
                                     logging_params=DEFAULT_LOGGING,
                                     **kwargs):
    # Load config
    config = DEFAULTS.deepcopy()
    config.merge(NameSpace(kwargs))

    # Setup logging
    log.setup_logging(logging_params)
    mlog = log.get_logger(__name__)

    mlog.info("ephemeris file: %s" % ephemeris.__file__)

    # Set the model to use
    fitter_function = utils.fit_point_source_transit
    model_function = utils.model_point_source_transit

    farg = inspect.getargspec(fitter_function)
    defaults = {
        key: val
        for key, val in zip(farg.args[-len(farg.defaults):], farg.defaults)
    }
    poly_deg_amp = kwargs.get('poly_deg_amp', defaults['poly_deg_amp'])
    poly_deg_phi = kwargs.get('poly_deg_phi', defaults['poly_deg_phi'])
    poly_type = kwargs.get('poly_type', defaults['poly_type'])

    param_name = ([
        '%s_poly_amp_coeff%d' % (poly_type, cc)
        for cc in range(poly_deg_amp + 1)
    ] + [
        '%s_poly_phi_coeff%d' % (poly_type, cc)
        for cc in range(poly_deg_phi + 1)
    ])

    model_kwargs = [('poly_deg_amp', poly_deg_amp),
                    ('poly_deg_phi', poly_deg_phi), ('poly_type', poly_type)]
    model_name = '.'.join(
        [getattr(model_function, key) for key in ['__module__', '__name__']])

    tval = {}

    # Set where to evaluate gain
    ha_eval_str = ['raw_transit']

    if config.multi_sample:
        ha_eval_str += ['transit', 'peak']
        ha_eval = [0.0, None]
        fitslc = slice(1, 3)

    ind_eval = ha_eval_str.index(config.evaluate_gain_at)

    # Determine dimensions
    direction = ['amp', 'phi']
    nparam = len(param_name)
    ngain = len(ha_eval_str)
    ndir = len(direction)

    # Determine frequencies
    data = andata.CorrData.from_acq_h5(file_list,
                                       datasets=(),
                                       start=start,
                                       stop=stop)
    freq = data.freq

    if physical_freq is not None:
        index_freq = np.array(
            [np.argmin(np.abs(ff - freq)) for ff in physical_freq])
        freq_sel = utils.convert_to_slice(index_freq)
        freq = freq[index_freq]
    else:
        index_freq = np.arange(freq.size)
        freq_sel = None

    nfreq = freq.size

    # Compute flux of source
    inv_rt_flux_density = tools.invert_no_zero(
        np.sqrt(FluxCatalog[source].predict_flux(freq)))

    # Read in the eigenvaluess for all frequencies
    data = andata.CorrData.from_acq_h5(file_list,
                                       datasets=['erms', 'eval'],
                                       freq_sel=freq_sel,
                                       start=start,
                                       stop=stop)

    # Determine source coordinates
    this_csd = np.floor(ephemeris.unix_to_csd(np.median(data.time)))
    timestamp0 = ephemeris.transit_times(FluxCatalog[source].skyfield,
                                         ephemeris.csd_to_unix(this_csd))[0]
    src_ra, src_dec = ephemeris.object_coords(FluxCatalog[source].skyfield,
                                              date=timestamp0,
                                              deg=True)

    ra = ephemeris.lsa(data.time)
    ha = ra - src_ra
    ha = ha - (ha > 180.0) * 360.0 + (ha < -180.0) * 360.0
    ha = np.radians(ha)

    itrans = np.argmin(np.abs(ha))

    window = 0.75 * np.max(np.abs(ha))

    off_source = np.abs(ha) > window

    mlog.info("CSD %d" % this_csd)
    mlog.info("Hour angle at transit (%d of %d):  %0.2f deg   " %
              (itrans, len(ha), np.degrees(ha[itrans])))
    mlog.info("Hour angle off source: %0.2f deg" %
              np.median(np.abs(np.degrees(ha[off_source]))))

    src_dec = np.radians(src_dec)
    lat = np.radians(ephemeris.CHIMELATITUDE)

    # Determine division of frequencies
    ninput = data.ninput
    ntime = data.ntime
    nblock_freq = int(np.ceil(nfreq / float(config.nfreq_per_block)))

    # Determine bad inputs
    eps = 10.0 * np.finfo(data['erms'].dtype).eps
    good_freq = np.flatnonzero(np.all(data['erms'][:] > eps, axis=-1))
    ind_sub_freq = good_freq[slice(0, good_freq.size,
                                   max(int(good_freq.size / 10), 1))]

    tmp_data = andata.CorrData.from_acq_h5(file_list,
                                           datasets=['evec'],
                                           freq_sel=ind_sub_freq,
                                           start=start,
                                           stop=stop)
    eps = 10.0 * np.finfo(tmp_data['evec'].dtype).eps
    bad_input = np.flatnonzero(
        np.all(np.abs(tmp_data['evec'][:, 0]) < eps, axis=(0, 2)))

    input_axis = tmp_data.input.copy()

    del tmp_data

    # Query layout database for correlator inputs
    if inputmap is None:
        inputmap = tools.get_correlator_inputs(
            datetime.datetime.utcfromtimestamp(data.time[itrans]),
            correlator='chime')

    inputmap = tools.reorder_correlator_inputs(input_axis, inputmap)

    tools.change_chime_location(rotation=config.telescope_rotation)

    # Determine x and y pol index
    xfeeds = np.array([
        idf for idf, inp in enumerate(inputmap)
        if (idf not in bad_input) and tools.is_array_x(inp)
    ])
    yfeeds = np.array([
        idf for idf, inp in enumerate(inputmap)
        if (idf not in bad_input) and tools.is_array_y(inp)
    ])

    nfeed = xfeeds.size + yfeeds.size

    pol = [yfeeds, xfeeds]
    polstr = ['Y', 'X']
    npol = len(pol)

    neigen = min(max(npol, config.neigen), data['eval'].shape[1])

    phase_ref = config.phase_reference_index
    phase_ref_by_pol = [
        pol[pp].tolist().index(phase_ref[pp]) for pp in range(npol)
    ]

    # Calculate dynamic range
    eval0_off_source = np.median(data['eval'][:, 0, off_source], axis=-1)

    dyn = data['eval'][:, 1, :] * tools.invert_no_zero(
        eval0_off_source[:, np.newaxis])

    # Determine frequencies to mask
    not_rfi = np.ones((nfreq, 1), dtype=np.bool)
    if config.mask_rfi is not None:
        for frng in config.mask_rfi:
            not_rfi[:, 0] &= ((freq < frng[0]) | (freq > frng[1]))

    mlog.info("%0.1f percent of frequencies available after masking RFI." %
              (100.0 * np.sum(not_rfi, dtype=np.float32) / float(nfreq), ))

    #dyn_flg = utils.contiguous_flag(dyn > config.dyn_rng_threshold, centre=itrans)
    if source in config.dyn_rng_threshold:
        dyn_rng_threshold = config.dyn_rng_threshold[source]
    else:
        dyn_rng_threshold = config.dyn_rng_threshold.default

    mlog.info("Dynamic range threshold set to %0.1f." % dyn_rng_threshold)

    dyn_flg = dyn > dyn_rng_threshold

    # Calculate fit flag
    fit_flag = np.zeros((nfreq, npol, ntime), dtype=np.bool)
    for pp in range(npol):

        mlog.info("Dynamic Range Nsample, Pol %d:  %s" % (pp, ','.join([
            "%d" % xx for xx in np.percentile(np.sum(dyn_flg, axis=-1),
                                              [25, 50, 75, 100])
        ])))

        if config.nsigma1 is None:
            fit_flag[:, pp, :] = dyn_flg & not_rfi

        else:

            fit_window = config.nsigma1 * np.radians(
                utils.get_window(freq, pol=polstr[pp], dec=src_dec, deg=True))

            win_flg = np.abs(ha)[np.newaxis, :] <= fit_window[:, np.newaxis]

            fit_flag[:, pp, :] = (dyn_flg & win_flg & not_rfi)

    # Calculate base error
    base_err = data['erms'][:, np.newaxis, :]

    # Check for sign flips
    ref_resp = andata.CorrData.from_acq_h5(file_list,
                                           datasets=['evec'],
                                           input_sel=config.eigen_reference,
                                           freq_sel=freq_sel,
                                           start=start,
                                           stop=stop)['evec'][:, 0:neigen,
                                                              0, :]

    sign0 = 1.0 - 2.0 * (ref_resp.real < 0.0)

    # Check that we have the correct reference feed
    if np.any(np.abs(ref_resp.imag) > 0.0):
        ValueError("Reference feed %d is incorrect." % config.eigen_reference)

    del ref_resp

    # Save index_map
    results = {}
    results['model'] = model_name
    results['param'] = param_name
    results['freq'] = data.index_map['freq'][:]
    results['input'] = input_axis
    results['eval'] = ha_eval_str
    results['dir'] = direction

    for key, val in model_kwargs:
        results[key] = val

    # Initialize numpy arrays to hold results
    if config.return_response:

        results['response'] = np.zeros((nfreq, ninput, ntime),
                                       dtype=np.complex64)
        results['response_err'] = np.zeros((nfreq, ninput, ntime),
                                           dtype=np.float32)
        results['fit_flag'] = fit_flag
        results['ha_axis'] = ha
        results['ra'] = ra

    else:

        results['gain_eval'] = np.zeros((nfreq, ninput, ngain),
                                        dtype=np.complex64)
        results['weight_eval'] = np.zeros((nfreq, ninput, ngain),
                                          dtype=np.float32)
        results['frac_gain_err'] = np.zeros((nfreq, ninput, ngain, ndir),
                                            dtype=np.float32)

        results['parameter'] = np.zeros((nfreq, ninput, nparam),
                                        dtype=np.float32)
        results['parameter_err'] = np.zeros((nfreq, ninput, nparam),
                                            dtype=np.float32)

        results['index_eval'] = np.full((nfreq, ninput), -1, dtype=np.int8)
        results['gain'] = np.zeros((nfreq, ninput), dtype=np.complex64)
        results['weight'] = np.zeros((nfreq, ninput), dtype=np.float32)

        results['ndof'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32)
        results['chisq'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32)

        results['timing'] = np.zeros((nfreq, ninput), dtype=np.complex64)

    # Initialize metric like variables
    results['runtime'] = np.zeros((nblock_freq, 2), dtype=np.float64)

    # Compute distances
    dist = tools.get_feed_positions(inputmap)
    for pp, feeds in enumerate(pol):
        dist[feeds, :] -= dist[phase_ref[pp], np.newaxis, :]

    # Loop over frequency blocks
    for gg in range(nblock_freq):

        mlog.info("Frequency block %d of %d." % (gg, nblock_freq))

        fstart = gg * config.nfreq_per_block
        fstop = min((gg + 1) * config.nfreq_per_block, nfreq)
        findex = np.arange(fstart, fstop)
        ngroup = findex.size

        freq_sel = utils.convert_to_slice(index_freq[findex])

        timeit_start_gg = time.time()

        #
        if config.return_response:
            gstart = start
            gstop = stop

            tslc = slice(0, ntime)

        else:
            good_times = np.flatnonzero(np.any(fit_flag[findex], axis=(0, 1)))

            if good_times.size == 0:
                continue

            gstart = int(np.min(good_times))
            gstop = int(np.max(good_times)) + 1

            tslc = slice(gstart, gstop)

            gstart += start
            gstop += start

        hag = ha[tslc]
        itrans = np.argmin(np.abs(hag))

        # Load eigenvectors.
        nudata = andata.CorrData.from_acq_h5(
            file_list,
            datasets=['evec', 'vis', 'flags/vis_weight'],
            apply_gain=False,
            freq_sel=freq_sel,
            start=gstart,
            stop=gstop)

        # Save time to load data
        results['runtime'][gg, 0] = time.time() - timeit_start_gg
        timeit_start_gg = time.time()

        mlog.info("Time to load (per frequency):  %0.3f sec" %
                  (results['runtime'][gg, 0] / ngroup, ))

        # Loop over polarizations
        for pp, feeds in enumerate(pol):

            # Get timing correction
            if tcorr is not None:
                tgain = tcorr.get_gain(nudata.freq, nudata.input[feeds],
                                       nudata.time)
                tgain *= tgain[:, phase_ref_by_pol[pp], np.newaxis, :].conj()

                tgain_transit = tgain[:, :, itrans].copy()
                tgain *= tgain_transit[:, :, np.newaxis].conj()

            # Create the polarization masking vector
            P = np.zeros((1, ninput, 1), dtype=np.float64)
            P[:, feeds, :] = 1.0

            # Loop over frequencies
            for gff, ff in enumerate(findex):

                flg = fit_flag[ff, pp, tslc]

                if (2 * int(np.sum(flg))) < (nparam +
                                             1) and not config.return_response:
                    continue

                # Normalize by eigenvalue and correct for pi phase flips in process.
                resp = (nudata['evec'][gff, 0:neigen, :, :] *
                        np.sqrt(data['eval'][ff, 0:neigen, np.newaxis, tslc]) *
                        sign0[ff, :, np.newaxis, tslc])

                # Rotate to single-pol response
                # Move time to first axis for the matrix multiplication
                invL = tools.invert_no_zero(
                    np.rollaxis(data['eval'][ff, 0:neigen, np.newaxis, tslc],
                                -1, 0))

                UT = np.rollaxis(resp, -1, 0)
                U = np.swapaxes(UT, -1, -2)

                mu, vp = np.linalg.eigh(np.matmul(UT.conj(), P * U))

                rsign0 = (1.0 - 2.0 * (vp[:, 0, np.newaxis, :].real < 0.0))

                resp = mu[:, np.newaxis, :] * np.matmul(U, rsign0 * vp * invL)

                # Extract feeds of this pol
                # Transpose so that time is back to last axis
                resp = resp[:, feeds, -1].T

                # Compute error on response
                dataflg = ((nudata.weight[gff, feeds, :] > 0.0)
                           & np.isfinite(nudata.weight[gff, feeds, :])).astype(
                               np.float32)

                resp_err = dataflg * base_err[ff, :, tslc] * np.sqrt(
                    nudata.vis[gff, feeds, :].real) * tools.invert_no_zero(
                        np.sqrt(mu[np.newaxis, :, -1]))

                # Reference to specific input
                resp *= np.exp(
                    -1.0J *
                    np.angle(resp[phase_ref_by_pol[pp], np.newaxis, :]))

                # Apply timing correction
                if tcorr is not None:
                    resp *= tgain[gff]

                    results['timing'][ff, feeds] = tgain_transit[gff]

                # Fringestop
                lmbda = scipy.constants.c * 1e-6 / nudata.freq[gff]

                resp *= tools.fringestop_phase(
                    hag[np.newaxis, :], lat, src_dec,
                    dist[feeds, 0, np.newaxis] / lmbda,
                    dist[feeds, 1, np.newaxis] / lmbda)

                # Normalize by source flux
                resp *= inv_rt_flux_density[ff]
                resp_err *= inv_rt_flux_density[ff]

                # If requested, reference phase to the median value
                if config.med_phase_ref:
                    phi0 = np.angle(resp[:, itrans, np.newaxis])
                    resp *= np.exp(-1.0J * phi0)
                    resp *= np.exp(
                        -1.0J *
                        np.median(np.angle(resp), axis=0, keepdims=True))
                    resp *= np.exp(1.0J * phi0)

                # Check if return_response flag was set by user
                if not config.return_response:

                    if config.multi_sample:
                        moving_window = config.nsigma2 and config.nsigma2 * np.radians(
                            utils.get_window(nudata.freq[gff],
                                             pol=polstr[pp],
                                             dec=src_dec,
                                             deg=True))

                    # Loop over inputs
                    for pii, ii in enumerate(feeds):

                        is_good = flg & (np.abs(resp[pii, :]) >
                                         0.0) & (resp_err[pii, :] > 0.0)

                        # Set the intial gains based on raw response at transit
                        if is_good[itrans]:
                            results['gain_eval'][ff, ii,
                                                 0] = tools.invert_no_zero(
                                                     resp[pii, itrans])
                            results['frac_gain_err'][ff, ii, 0, :] = (
                                resp_err[pii, itrans] * tools.invert_no_zero(
                                    np.abs(resp[pii, itrans])))
                            results['weight_eval'][ff, ii, 0] = 0.5 * (
                                np.abs(resp[pii, itrans])**2 *
                                tools.invert_no_zero(resp_err[pii, itrans]))**2

                            results['index_eval'][ff, ii] = 0
                            results['gain'][ff,
                                            ii] = results['gain_eval'][ff, ii,
                                                                       0]
                            results['weight'][ff,
                                              ii] = results['weight_eval'][ff,
                                                                           ii,
                                                                           0]

                        # Exit if not performing multi time sample fit
                        if not config.multi_sample:
                            continue

                        if (2 * int(np.sum(is_good))) < (nparam + 1):
                            continue

                        try:
                            param, param_err, gain, gain_err, ndof, chisq, tval = fitter_function(
                                hag[is_good],
                                resp[pii, is_good],
                                resp_err[pii, is_good],
                                ha_eval,
                                window=moving_window,
                                tval=tval,
                                **config.fit)
                        except Exception as rex:
                            if config.verbose:
                                mlog.info(
                                    "Frequency %0.2f, Feed %d failed with error: %s"
                                    % (nudata.freq[gff], ii, rex))
                            continue

                        # Check for nan
                        wfit = (np.abs(gain) *
                                tools.invert_no_zero(np.abs(gain_err)))**2
                        if np.any(~np.isfinite(np.abs(gain))) or np.any(
                                ~np.isfinite(wfit)):
                            continue

                        # Save to results using the convention that you should *multiply* the visibilites by the gains
                        results['gain_eval'][
                            ff, ii, fitslc] = tools.invert_no_zero(gain)
                        results['frac_gain_err'][ff, ii, fitslc,
                                                 0] = gain_err.real
                        results['frac_gain_err'][ff, ii, fitslc,
                                                 1] = gain_err.imag
                        results['weight_eval'][ff, ii, fitslc] = wfit

                        results['parameter'][ff, ii, :] = param
                        results['parameter_err'][ff, ii, :] = param_err

                        results['ndof'][ff, ii, :] = ndof
                        results['chisq'][ff, ii, :] = chisq

                        # Check if the fit was succesful and update the gain evaluation index appropriately
                        if np.all((chisq / ndof.astype(np.float32)
                                   ) <= config.chisq_per_dof_threshold):
                            results['index_eval'][ff, ii] = ind_eval
                            results['gain'][ff, ii] = results['gain_eval'][
                                ff, ii, ind_eval]
                            results['weight'][ff, ii] = results['weight_eval'][
                                ff, ii, ind_eval]

                else:

                    # Return response only (do not fit model)
                    results['response'][ff, feeds, :] = resp
                    results['response_err'][ff, feeds, :] = resp_err

        # Save time to fit data
        results['runtime'][gg, 1] = time.time() - timeit_start_gg

        mlog.info("Time to fit (per frequency):  %0.3f sec" %
                  (results['runtime'][gg, 1] / ngroup, ))

        # Clean up
        del nudata
        gc.collect()

    # Print total run time
    mlog.info("TOTAL TIME TO LOAD: %0.3f min" %
              (np.sum(results['runtime'][:, 0]) / 60.0, ))
    mlog.info("TOTAL TIME TO FIT:  %0.3f min" %
              (np.sum(results['runtime'][:, 1]) / 60.0, ))

    # Set the best estimate of the gain
    if not config.return_response:

        flag = results['index_eval'] >= 0
        gain = results['gain']

        # Compute amplitude
        amp = np.abs(gain)

        # Hard cutoffs on the amplitude
        med_amp = np.median(amp[flag])
        min_amp = med_amp * config.min_amp_scale_factor
        max_amp = med_amp * config.max_amp_scale_factor

        flag &= ((amp >= min_amp) & (amp <= max_amp))

        # Flag outliers in amplitude for each frequency
        for pp, feeds in enumerate(pol):

            med_amp_by_pol = np.zeros(nfreq, dtype=np.float32)
            sig_amp_by_pol = np.zeros(nfreq, dtype=np.float32)

            for ff in range(nfreq):

                this_flag = flag[ff, feeds]

                if np.any(this_flag):

                    med, slow, shigh = utils.estimate_directional_scale(
                        amp[ff, feeds[this_flag]])
                    lower = med - config.nsigma_outlier * slow
                    upper = med + config.nsigma_outlier * shigh

                    flag[ff, feeds] &= ((amp[ff, feeds] >= lower) &
                                        (amp[ff, feeds] <= upper))

                    med_amp_by_pol[ff] = med
                    sig_amp_by_pol[ff] = 0.5 * (shigh - slow) / np.sqrt(
                        np.sum(this_flag, dtype=np.float32))

            if config.nsigma_med_outlier:

                med_flag = med_amp_by_pol > 0.0

                not_outlier = flag_outliers(med_amp_by_pol,
                                            med_flag,
                                            window=config.window_med_outlier,
                                            nsigma=config.nsigma_med_outlier)
                flag[:, feeds] &= not_outlier[:, np.newaxis]

                mlog.info("Pol %s:  %d frequencies are outliers." %
                          (polstr[pp],
                           np.sum(~not_outlier & med_flag, dtype=np.int)))

        # Determine bad frequencies
        flag_freq = (np.sum(flag, axis=1, dtype=np.float32) /
                     float(ninput)) > config.threshold_good_freq
        good_freq = np.flatnonzero(flag_freq)

        # Determine bad inputs
        fraction_good = np.sum(flag[good_freq, :], axis=0,
                               dtype=np.float32) / float(good_freq.size)
        flag_input = fraction_good > config.threshold_good_input

        # Finalize flag
        flag &= (flag_freq[:, np.newaxis] & flag_input[np.newaxis, :])

        # Interpolate gains
        interp_gain, interp_weight = interpolate_gain(
            freq,
            gain,
            results['weight'],
            flag=flag,
            length_scale=config.interpolation_length_scale,
            mlog=mlog)
        # Save gains to object
        results['flag'] = flag
        results['gain'] = interp_gain
        results['weight'] = interp_weight

    # Return results
    return results