def search_extended_timing_solutions(timing_files, timestamp): # Load the timing correction nfiles = len(timing_files) tstart = np.zeros(nfiles, dtype=np.float32) tstop = np.zeros(nfiles, dtype=np.float32) all_tcorr = [] for ff, filename in enumerate(timing_files): kwargs = {} with h5py.File(filename, 'r') as handler: for key in ['tau', 'avg_phase', 'noise_source', 'time']: kwargs[key] = handler[key][:] tcorr = timing.TimingCorrection(**kwargs) all_tcorr.append(tcorr) tstart[ff] = tcorr.time[0] tstop[ff] = tcorr.time[-1] # Map timestamp to a timing correction object imatch = np.flatnonzero((timestamp >= tstart) & (timestamp <= tstop)) if imatch.size > 1: ValueError("Timing corrections overlap!") elif imatch.size < 1: ValueError("No timing correction for transit on %s (CSD %d)" % (ephemeris.unix_to_datetime(timestamp).strftime("%Y-%m-%d"), ephemeris.unix_to_csd(timestamp))) return all_tcorr[imatch[0]]
def _flags_mask(self, index_map_ra): if self._cache_flags: flag_time_spans = get_flags_cached(self.flags, self._cache_reset_time) else: flag_time_spans = get_flags( self.flags, csd_to_unix(self.lsd.lsd), csd_to_unix(self.lsd.lsd + 1), ) csd_arr = self.lsd.lsd + index_map_ra / 360.0 flag_mask = np.zeros_like(csd_arr, dtype=np.bool) for type_, ca, cb in flag_time_spans: flag_mask[(csd_arr > unix_to_csd(ca)) & (csd_arr < unix_to_csd(cb))] = True return flag_mask[:, np.newaxis]
def csds_in_range(start, end, step=1): """Get the CSDs within a time range. The start and end parameters must either be strings of the form "CSD\d+" (i.e. CSD followed by an int), which specifies an exact CSD start, or a form that `ephemeris.ensure_unix` understands. Parameters ---------- start : str or parseable to datetime Start of interval. end : str or parseable to datetime End of interval. If `None` use now. Note that for CSD intervals the end is *inclusive* (unlike a `range`). Returns ------- csds : list of ints """ if end is None: end = datetime.datetime.utcnow() if start.startswith("CSD"): start_csd = int(start[3:]) else: start_csd = ephemeris.unix_to_csd(ephemeris.ensure_unix(start)) start_csd = math.floor(start_csd) if end.startswith("CSD"): end_csd = int(end[3:]) else: end_csd = ephemeris.unix_to_csd(ephemeris.ensure_unix(end)) end_csd = math.ceil(end_csd) csds = [day for day in range(start_csd, end_csd + 1, step)] return csds
def from_date(cls, date: datetime.date): unix = time.mktime(date.timetuple()) lsd = int(unix_to_csd(unix)) day = cls(lsd, date) return day
def offline_point_source_calibration(file_list, source, inputmap=None, start=None, stop=None, physical_freq=None, tcorr=None, logging_params=DEFAULT_LOGGING, **kwargs): # Load config config = DEFAULTS.deepcopy() config.merge(NameSpace(kwargs)) # Setup logging log.setup_logging(logging_params) mlog = log.get_logger(__name__) mlog.info("ephemeris file: %s" % ephemeris.__file__) # Set the model to use fitter_function = utils.fit_point_source_transit model_function = utils.model_point_source_transit farg = inspect.getargspec(fitter_function) defaults = { key: val for key, val in zip(farg.args[-len(farg.defaults):], farg.defaults) } poly_deg_amp = kwargs.get('poly_deg_amp', defaults['poly_deg_amp']) poly_deg_phi = kwargs.get('poly_deg_phi', defaults['poly_deg_phi']) poly_type = kwargs.get('poly_type', defaults['poly_type']) param_name = ([ '%s_poly_amp_coeff%d' % (poly_type, cc) for cc in range(poly_deg_amp + 1) ] + [ '%s_poly_phi_coeff%d' % (poly_type, cc) for cc in range(poly_deg_phi + 1) ]) model_kwargs = [('poly_deg_amp', poly_deg_amp), ('poly_deg_phi', poly_deg_phi), ('poly_type', poly_type)] model_name = '.'.join( [getattr(model_function, key) for key in ['__module__', '__name__']]) tval = {} # Set where to evaluate gain ha_eval_str = ['raw_transit'] if config.multi_sample: ha_eval_str += ['transit', 'peak'] ha_eval = [0.0, None] fitslc = slice(1, 3) ind_eval = ha_eval_str.index(config.evaluate_gain_at) # Determine dimensions direction = ['amp', 'phi'] nparam = len(param_name) ngain = len(ha_eval_str) ndir = len(direction) # Determine frequencies data = andata.CorrData.from_acq_h5(file_list, datasets=(), start=start, stop=stop) freq = data.freq if physical_freq is not None: index_freq = np.array( [np.argmin(np.abs(ff - freq)) for ff in physical_freq]) freq_sel = utils.convert_to_slice(index_freq) freq = freq[index_freq] else: index_freq = np.arange(freq.size) freq_sel = None nfreq = freq.size # Compute flux of source inv_rt_flux_density = tools.invert_no_zero( np.sqrt(FluxCatalog[source].predict_flux(freq))) # Read in the eigenvaluess for all frequencies data = andata.CorrData.from_acq_h5(file_list, datasets=['erms', 'eval'], freq_sel=freq_sel, start=start, stop=stop) # Determine source coordinates this_csd = np.floor(ephemeris.unix_to_csd(np.median(data.time))) timestamp0 = ephemeris.transit_times(FluxCatalog[source].skyfield, ephemeris.csd_to_unix(this_csd))[0] src_ra, src_dec = ephemeris.object_coords(FluxCatalog[source].skyfield, date=timestamp0, deg=True) ra = ephemeris.lsa(data.time) ha = ra - src_ra ha = ha - (ha > 180.0) * 360.0 + (ha < -180.0) * 360.0 ha = np.radians(ha) itrans = np.argmin(np.abs(ha)) window = 0.75 * np.max(np.abs(ha)) off_source = np.abs(ha) > window mlog.info("CSD %d" % this_csd) mlog.info("Hour angle at transit (%d of %d): %0.2f deg " % (itrans, len(ha), np.degrees(ha[itrans]))) mlog.info("Hour angle off source: %0.2f deg" % np.median(np.abs(np.degrees(ha[off_source])))) src_dec = np.radians(src_dec) lat = np.radians(ephemeris.CHIMELATITUDE) # Determine division of frequencies ninput = data.ninput ntime = data.ntime nblock_freq = int(np.ceil(nfreq / float(config.nfreq_per_block))) # Determine bad inputs eps = 10.0 * np.finfo(data['erms'].dtype).eps good_freq = np.flatnonzero(np.all(data['erms'][:] > eps, axis=-1)) ind_sub_freq = good_freq[slice(0, good_freq.size, max(int(good_freq.size / 10), 1))] tmp_data = andata.CorrData.from_acq_h5(file_list, datasets=['evec'], freq_sel=ind_sub_freq, start=start, stop=stop) eps = 10.0 * np.finfo(tmp_data['evec'].dtype).eps bad_input = np.flatnonzero( np.all(np.abs(tmp_data['evec'][:, 0]) < eps, axis=(0, 2))) input_axis = tmp_data.input.copy() del tmp_data # Query layout database for correlator inputs if inputmap is None: inputmap = tools.get_correlator_inputs( datetime.datetime.utcfromtimestamp(data.time[itrans]), correlator='chime') inputmap = tools.reorder_correlator_inputs(input_axis, inputmap) tools.change_chime_location(rotation=config.telescope_rotation) # Determine x and y pol index xfeeds = np.array([ idf for idf, inp in enumerate(inputmap) if (idf not in bad_input) and tools.is_array_x(inp) ]) yfeeds = np.array([ idf for idf, inp in enumerate(inputmap) if (idf not in bad_input) and tools.is_array_y(inp) ]) nfeed = xfeeds.size + yfeeds.size pol = [yfeeds, xfeeds] polstr = ['Y', 'X'] npol = len(pol) neigen = min(max(npol, config.neigen), data['eval'].shape[1]) phase_ref = config.phase_reference_index phase_ref_by_pol = [ pol[pp].tolist().index(phase_ref[pp]) for pp in range(npol) ] # Calculate dynamic range eval0_off_source = np.median(data['eval'][:, 0, off_source], axis=-1) dyn = data['eval'][:, 1, :] * tools.invert_no_zero( eval0_off_source[:, np.newaxis]) # Determine frequencies to mask not_rfi = np.ones((nfreq, 1), dtype=np.bool) if config.mask_rfi is not None: for frng in config.mask_rfi: not_rfi[:, 0] &= ((freq < frng[0]) | (freq > frng[1])) mlog.info("%0.1f percent of frequencies available after masking RFI." % (100.0 * np.sum(not_rfi, dtype=np.float32) / float(nfreq), )) #dyn_flg = utils.contiguous_flag(dyn > config.dyn_rng_threshold, centre=itrans) if source in config.dyn_rng_threshold: dyn_rng_threshold = config.dyn_rng_threshold[source] else: dyn_rng_threshold = config.dyn_rng_threshold.default mlog.info("Dynamic range threshold set to %0.1f." % dyn_rng_threshold) dyn_flg = dyn > dyn_rng_threshold # Calculate fit flag fit_flag = np.zeros((nfreq, npol, ntime), dtype=np.bool) for pp in range(npol): mlog.info("Dynamic Range Nsample, Pol %d: %s" % (pp, ','.join([ "%d" % xx for xx in np.percentile(np.sum(dyn_flg, axis=-1), [25, 50, 75, 100]) ]))) if config.nsigma1 is None: fit_flag[:, pp, :] = dyn_flg & not_rfi else: fit_window = config.nsigma1 * np.radians( utils.get_window(freq, pol=polstr[pp], dec=src_dec, deg=True)) win_flg = np.abs(ha)[np.newaxis, :] <= fit_window[:, np.newaxis] fit_flag[:, pp, :] = (dyn_flg & win_flg & not_rfi) # Calculate base error base_err = data['erms'][:, np.newaxis, :] # Check for sign flips ref_resp = andata.CorrData.from_acq_h5(file_list, datasets=['evec'], input_sel=config.eigen_reference, freq_sel=freq_sel, start=start, stop=stop)['evec'][:, 0:neigen, 0, :] sign0 = 1.0 - 2.0 * (ref_resp.real < 0.0) # Check that we have the correct reference feed if np.any(np.abs(ref_resp.imag) > 0.0): ValueError("Reference feed %d is incorrect." % config.eigen_reference) del ref_resp # Save index_map results = {} results['model'] = model_name results['param'] = param_name results['freq'] = data.index_map['freq'][:] results['input'] = input_axis results['eval'] = ha_eval_str results['dir'] = direction for key, val in model_kwargs: results[key] = val # Initialize numpy arrays to hold results if config.return_response: results['response'] = np.zeros((nfreq, ninput, ntime), dtype=np.complex64) results['response_err'] = np.zeros((nfreq, ninput, ntime), dtype=np.float32) results['fit_flag'] = fit_flag results['ha_axis'] = ha results['ra'] = ra else: results['gain_eval'] = np.zeros((nfreq, ninput, ngain), dtype=np.complex64) results['weight_eval'] = np.zeros((nfreq, ninput, ngain), dtype=np.float32) results['frac_gain_err'] = np.zeros((nfreq, ninput, ngain, ndir), dtype=np.float32) results['parameter'] = np.zeros((nfreq, ninput, nparam), dtype=np.float32) results['parameter_err'] = np.zeros((nfreq, ninput, nparam), dtype=np.float32) results['index_eval'] = np.full((nfreq, ninput), -1, dtype=np.int8) results['gain'] = np.zeros((nfreq, ninput), dtype=np.complex64) results['weight'] = np.zeros((nfreq, ninput), dtype=np.float32) results['ndof'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32) results['chisq'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32) results['timing'] = np.zeros((nfreq, ninput), dtype=np.complex64) # Initialize metric like variables results['runtime'] = np.zeros((nblock_freq, 2), dtype=np.float64) # Compute distances dist = tools.get_feed_positions(inputmap) for pp, feeds in enumerate(pol): dist[feeds, :] -= dist[phase_ref[pp], np.newaxis, :] # Loop over frequency blocks for gg in range(nblock_freq): mlog.info("Frequency block %d of %d." % (gg, nblock_freq)) fstart = gg * config.nfreq_per_block fstop = min((gg + 1) * config.nfreq_per_block, nfreq) findex = np.arange(fstart, fstop) ngroup = findex.size freq_sel = utils.convert_to_slice(index_freq[findex]) timeit_start_gg = time.time() # if config.return_response: gstart = start gstop = stop tslc = slice(0, ntime) else: good_times = np.flatnonzero(np.any(fit_flag[findex], axis=(0, 1))) if good_times.size == 0: continue gstart = int(np.min(good_times)) gstop = int(np.max(good_times)) + 1 tslc = slice(gstart, gstop) gstart += start gstop += start hag = ha[tslc] itrans = np.argmin(np.abs(hag)) # Load eigenvectors. nudata = andata.CorrData.from_acq_h5( file_list, datasets=['evec', 'vis', 'flags/vis_weight'], apply_gain=False, freq_sel=freq_sel, start=gstart, stop=gstop) # Save time to load data results['runtime'][gg, 0] = time.time() - timeit_start_gg timeit_start_gg = time.time() mlog.info("Time to load (per frequency): %0.3f sec" % (results['runtime'][gg, 0] / ngroup, )) # Loop over polarizations for pp, feeds in enumerate(pol): # Get timing correction if tcorr is not None: tgain = tcorr.get_gain(nudata.freq, nudata.input[feeds], nudata.time) tgain *= tgain[:, phase_ref_by_pol[pp], np.newaxis, :].conj() tgain_transit = tgain[:, :, itrans].copy() tgain *= tgain_transit[:, :, np.newaxis].conj() # Create the polarization masking vector P = np.zeros((1, ninput, 1), dtype=np.float64) P[:, feeds, :] = 1.0 # Loop over frequencies for gff, ff in enumerate(findex): flg = fit_flag[ff, pp, tslc] if (2 * int(np.sum(flg))) < (nparam + 1) and not config.return_response: continue # Normalize by eigenvalue and correct for pi phase flips in process. resp = (nudata['evec'][gff, 0:neigen, :, :] * np.sqrt(data['eval'][ff, 0:neigen, np.newaxis, tslc]) * sign0[ff, :, np.newaxis, tslc]) # Rotate to single-pol response # Move time to first axis for the matrix multiplication invL = tools.invert_no_zero( np.rollaxis(data['eval'][ff, 0:neigen, np.newaxis, tslc], -1, 0)) UT = np.rollaxis(resp, -1, 0) U = np.swapaxes(UT, -1, -2) mu, vp = np.linalg.eigh(np.matmul(UT.conj(), P * U)) rsign0 = (1.0 - 2.0 * (vp[:, 0, np.newaxis, :].real < 0.0)) resp = mu[:, np.newaxis, :] * np.matmul(U, rsign0 * vp * invL) # Extract feeds of this pol # Transpose so that time is back to last axis resp = resp[:, feeds, -1].T # Compute error on response dataflg = ((nudata.weight[gff, feeds, :] > 0.0) & np.isfinite(nudata.weight[gff, feeds, :])).astype( np.float32) resp_err = dataflg * base_err[ff, :, tslc] * np.sqrt( nudata.vis[gff, feeds, :].real) * tools.invert_no_zero( np.sqrt(mu[np.newaxis, :, -1])) # Reference to specific input resp *= np.exp( -1.0J * np.angle(resp[phase_ref_by_pol[pp], np.newaxis, :])) # Apply timing correction if tcorr is not None: resp *= tgain[gff] results['timing'][ff, feeds] = tgain_transit[gff] # Fringestop lmbda = scipy.constants.c * 1e-6 / nudata.freq[gff] resp *= tools.fringestop_phase( hag[np.newaxis, :], lat, src_dec, dist[feeds, 0, np.newaxis] / lmbda, dist[feeds, 1, np.newaxis] / lmbda) # Normalize by source flux resp *= inv_rt_flux_density[ff] resp_err *= inv_rt_flux_density[ff] # If requested, reference phase to the median value if config.med_phase_ref: phi0 = np.angle(resp[:, itrans, np.newaxis]) resp *= np.exp(-1.0J * phi0) resp *= np.exp( -1.0J * np.median(np.angle(resp), axis=0, keepdims=True)) resp *= np.exp(1.0J * phi0) # Check if return_response flag was set by user if not config.return_response: if config.multi_sample: moving_window = config.nsigma2 and config.nsigma2 * np.radians( utils.get_window(nudata.freq[gff], pol=polstr[pp], dec=src_dec, deg=True)) # Loop over inputs for pii, ii in enumerate(feeds): is_good = flg & (np.abs(resp[pii, :]) > 0.0) & (resp_err[pii, :] > 0.0) # Set the intial gains based on raw response at transit if is_good[itrans]: results['gain_eval'][ff, ii, 0] = tools.invert_no_zero( resp[pii, itrans]) results['frac_gain_err'][ff, ii, 0, :] = ( resp_err[pii, itrans] * tools.invert_no_zero( np.abs(resp[pii, itrans]))) results['weight_eval'][ff, ii, 0] = 0.5 * ( np.abs(resp[pii, itrans])**2 * tools.invert_no_zero(resp_err[pii, itrans]))**2 results['index_eval'][ff, ii] = 0 results['gain'][ff, ii] = results['gain_eval'][ff, ii, 0] results['weight'][ff, ii] = results['weight_eval'][ff, ii, 0] # Exit if not performing multi time sample fit if not config.multi_sample: continue if (2 * int(np.sum(is_good))) < (nparam + 1): continue try: param, param_err, gain, gain_err, ndof, chisq, tval = fitter_function( hag[is_good], resp[pii, is_good], resp_err[pii, is_good], ha_eval, window=moving_window, tval=tval, **config.fit) except Exception as rex: if config.verbose: mlog.info( "Frequency %0.2f, Feed %d failed with error: %s" % (nudata.freq[gff], ii, rex)) continue # Check for nan wfit = (np.abs(gain) * tools.invert_no_zero(np.abs(gain_err)))**2 if np.any(~np.isfinite(np.abs(gain))) or np.any( ~np.isfinite(wfit)): continue # Save to results using the convention that you should *multiply* the visibilites by the gains results['gain_eval'][ ff, ii, fitslc] = tools.invert_no_zero(gain) results['frac_gain_err'][ff, ii, fitslc, 0] = gain_err.real results['frac_gain_err'][ff, ii, fitslc, 1] = gain_err.imag results['weight_eval'][ff, ii, fitslc] = wfit results['parameter'][ff, ii, :] = param results['parameter_err'][ff, ii, :] = param_err results['ndof'][ff, ii, :] = ndof results['chisq'][ff, ii, :] = chisq # Check if the fit was succesful and update the gain evaluation index appropriately if np.all((chisq / ndof.astype(np.float32) ) <= config.chisq_per_dof_threshold): results['index_eval'][ff, ii] = ind_eval results['gain'][ff, ii] = results['gain_eval'][ ff, ii, ind_eval] results['weight'][ff, ii] = results['weight_eval'][ ff, ii, ind_eval] else: # Return response only (do not fit model) results['response'][ff, feeds, :] = resp results['response_err'][ff, feeds, :] = resp_err # Save time to fit data results['runtime'][gg, 1] = time.time() - timeit_start_gg mlog.info("Time to fit (per frequency): %0.3f sec" % (results['runtime'][gg, 1] / ngroup, )) # Clean up del nudata gc.collect() # Print total run time mlog.info("TOTAL TIME TO LOAD: %0.3f min" % (np.sum(results['runtime'][:, 0]) / 60.0, )) mlog.info("TOTAL TIME TO FIT: %0.3f min" % (np.sum(results['runtime'][:, 1]) / 60.0, )) # Set the best estimate of the gain if not config.return_response: flag = results['index_eval'] >= 0 gain = results['gain'] # Compute amplitude amp = np.abs(gain) # Hard cutoffs on the amplitude med_amp = np.median(amp[flag]) min_amp = med_amp * config.min_amp_scale_factor max_amp = med_amp * config.max_amp_scale_factor flag &= ((amp >= min_amp) & (amp <= max_amp)) # Flag outliers in amplitude for each frequency for pp, feeds in enumerate(pol): med_amp_by_pol = np.zeros(nfreq, dtype=np.float32) sig_amp_by_pol = np.zeros(nfreq, dtype=np.float32) for ff in range(nfreq): this_flag = flag[ff, feeds] if np.any(this_flag): med, slow, shigh = utils.estimate_directional_scale( amp[ff, feeds[this_flag]]) lower = med - config.nsigma_outlier * slow upper = med + config.nsigma_outlier * shigh flag[ff, feeds] &= ((amp[ff, feeds] >= lower) & (amp[ff, feeds] <= upper)) med_amp_by_pol[ff] = med sig_amp_by_pol[ff] = 0.5 * (shigh - slow) / np.sqrt( np.sum(this_flag, dtype=np.float32)) if config.nsigma_med_outlier: med_flag = med_amp_by_pol > 0.0 not_outlier = flag_outliers(med_amp_by_pol, med_flag, window=config.window_med_outlier, nsigma=config.nsigma_med_outlier) flag[:, feeds] &= not_outlier[:, np.newaxis] mlog.info("Pol %s: %d frequencies are outliers." % (polstr[pp], np.sum(~not_outlier & med_flag, dtype=np.int))) # Determine bad frequencies flag_freq = (np.sum(flag, axis=1, dtype=np.float32) / float(ninput)) > config.threshold_good_freq good_freq = np.flatnonzero(flag_freq) # Determine bad inputs fraction_good = np.sum(flag[good_freq, :], axis=0, dtype=np.float32) / float(good_freq.size) flag_input = fraction_good > config.threshold_good_input # Finalize flag flag &= (flag_freq[:, np.newaxis] & flag_input[np.newaxis, :]) # Interpolate gains interp_gain, interp_weight = interpolate_gain( freq, gain, results['weight'], flag=flag, length_scale=config.interpolation_length_scale, mlog=mlog) # Save gains to object results['flag'] = flag results['gain'] = interp_gain results['weight'] = interp_weight # Return results return results
def process(self, sstream): """Calculate the mean(median) over the sidereal day. Parameters ---------- sstream : andata.CorrData or containers.SiderealStream Timestream or sidereal stream. Returns ------- mustream : same as sstream Sidereal stream containing only the mean(median) value. """ from .flagging import daytime_flag, transit_flag # Make sure we are distributed over frequency sstream.redistribute("freq") # Extract lsd lsd = sstream.attrs[ "lsd"] if "lsd" in sstream.attrs else sstream.attrs["csd"] lsd_list = lsd if hasattr(lsd, "__iter__") else [lsd] # Calculate the right ascension, method differs depending on input container if "ra" in sstream.index_map: ra = sstream.ra timestamp = { dd: ephemeris.csd_to_unix(dd + ra / 360.0) for dd in lsd_list } flag_quiet = np.ones(ra.size, dtype=np.bool) elif "time" in sstream.index_map: ra = ephemeris.lsa(sstream.time) timestamp = {lsd: sstream.time} flag_quiet = np.fix(ephemeris.unix_to_csd(sstream.time)) == lsd else: raise RuntimeError("Format of `sstream` argument is unknown.") # If requested, determine "quiet" region of sky. # In the case of a SiderealStack, there will be multiple LSDs and the # mask will be the logical AND of the mask from each individual LSDs. if self.mask_day: for dd, time_dd in timestamp.items(): # Mask daytime data flag_quiet &= ~daytime_flag(time_dd) if self.mask_sources: for dd, time_dd in timestamp.items(): # Mask data near bright source transits for body in self.body: flag_quiet &= ~transit_flag( body, time_dd, nsigma=self.nsigma) if self.mask_ra: # Only use data within user specified ranges of RA mask_ra = np.zeros(ra.size, dtype=np.bool) for ra_range in self.mask_ra: self.log.info("Using data between RA = [%0.2f, %0.2f] deg" % tuple(ra_range)) mask_ra |= (ra >= ra_range[0]) & (ra <= ra_range[1]) flag_quiet &= mask_ra # Create output container newra = np.mean(ra[flag_quiet], keepdims=True) mustream = containers.SiderealStream( ra=newra, axes_from=sstream, attrs_from=sstream, distributed=True, comm=sstream.comm, ) mustream.redistribute("freq") mustream.attrs["statistic"] = self._name_of_statistic # Dereference visibilities all_vis = sstream.vis[:].view(np.ndarray) mu_vis = mustream.vis[:].view(np.ndarray) # Combine the visibility weights with the quiet flag all_weight = sstream.weight[:].view(np.ndarray) * flag_quiet.astype( np.float32) if not self.inverse_variance: all_weight = (all_weight > 0.0).astype(np.float32) # Only include freqs/baselines where enough data is actually present frac_present = all_weight.sum(axis=-1) / flag_quiet.sum(axis=-1) all_weight *= (frac_present > self.missing_threshold)[..., np.newaxis] num_freq_missing_local = int( (frac_present < self.missing_threshold).all(axis=1).sum()) num_freq_missing = self.comm.allreduce(num_freq_missing_local, op=MPI.SUM) self.log.info( "Cannot estimate a sidereal mean for " f"{100.0 * num_freq_missing / len(mustream.freq):.2f}% of all frequencies." ) # Save the total number of nonzero samples as the weight dataset of the output # container mustream.weight[:] = np.sum(all_weight, axis=-1, keepdims=True) # If requested, compute median (requires loop over frequencies and baselines) if self.median: mu_vis[..., 0].real = weighted_median(all_vis.real.copy(), all_weight) mu_vis[..., 0].imag = weighted_median(all_vis.imag.copy(), all_weight) # Where all the weights are zero explicitly set the median to zero missing = ~(all_weight.any(axis=-1)) mu_vis[missing, 0] = 0.0 else: # Otherwise calculate the mean mu_vis[:] = np.sum(all_weight * all_vis, axis=-1, keepdims=True) mu_vis[:] *= tools.invert_no_zero(mustream.weight[:]) # Return sidereal stream containing the mean value return mustream
def view(self): if self.lsd is None: return panel.pane.Markdown("No data selected.") try: if self.intercylinder_only: name = "ringmap_intercyl" else: name = "ringmap" container = self.data.load_file(self.revision, self.lsd, name) except DataError as err: return panel.pane.Markdown( f"Error: {str(err)}. Please report this problem." ) # Index map for ra (x-axis) index_map_ra = container.index_map["ra"] axis_name_ra = "RA [degrees]" # Index map for sin(ZA)/sin(theta) (y-axis) index_map_el = container.index_map["el"] axis_name_el = "sin(\u03B8)" # Apply data selections sel_beam = np.where(container.index_map["beam"] == self.beam)[0] sel_freq = np.where( [f[0] for f in container.index_map["freq"]] == self.frequency )[0] if self.polarization == self.mean_pol_text: sel_pol = np.where( (container.index_map["pol"] == "XX") | (container.index_map["pol"] == "YY") )[0] rmap = np.squeeze(container.map[sel_beam, sel_pol, sel_freq]) rmap = np.nanmean(rmap, axis=0) else: sel_pol = np.where(container.index_map["pol"] == self.polarization)[0] rmap = np.squeeze(container.map[sel_beam, sel_pol, sel_freq]) if self.flag_mask: rmap = np.where(self._flags_mask(container.index_map["ra"]), np.nan, rmap) if self.weight_mask: try: rms = np.squeeze(container.rms[sel_pol, sel_freq]) except IndexError: logger.error( f"rms dataset of ringmap file for rev {self.revision} lsd " f"{self.lsd} is missing [{sel_pol}, {sel_freq}] (polarization, " f"frequency). rms has shape {container.rms.shape}" ) self.weight_mask = False else: rmap = np.where(self._weights_mask(rms), np.nan, rmap) # Set flagged data to nan rmap = np.where(rmap == 0, np.nan, rmap) if self.crosstalk_removal: # The mean of an all-nan slice (masked?) is nan. We don't need a warning about that. with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"All-NaN slice encountered") rmap -= np.nanmedian(rmap, axis=0) if self.template_subtraction: try: rm_stack = self.data.load_file_from_path( self._stack_path, ccontainers.RingMap ) except DataError as err: return panel.pane.Markdown( f"Error: {str(err)}. Please report this problem." ) # The stack file has all polarizations, so we can't reuse sel_pol if self.polarization == self.mean_pol_text: stack_sel_pol = np.where( (rm_stack.index_map["pol"] == "XX") | (rm_stack.index_map["pol"] == "YY") )[0] else: stack_sel_pol = np.where( rm_stack.index_map["pol"] == self.polarization )[0] try: rm_stack = np.squeeze(rm_stack.map[sel_beam, stack_sel_pol, sel_freq]) except IndexError as err: logger.error( f"map dataset of ringmap stack file " f"is missing [{sel_beam}, {stack_sel_pol}, {sel_freq}] (beam, polarization, " f"frequency). map has shape {rm_stack.map.shape}:\n{err}" ) self.template_subtraction = False else: if self.polarization == self.mean_pol_text: rm_stack = np.nanmean(rm_stack, axis=0) # FIXME: this is a hack. remove when rinmap stack file fixed. rmap -= rm_stack.reshape(rm_stack.shape[0], -1, 2).mean(axis=-1) if self.transpose: rmap = rmap.T index_x = index_map_ra index_y = index_map_el axis_names = [axis_name_ra, axis_name_el] xlim, ylim = self.ylim, self.xlim else: index_x = index_map_el index_y = index_map_ra axis_names = [axis_name_el, axis_name_ra] xlim, ylim = self.xlim, self.ylim img = hv.Image( (index_x, index_y, rmap), datatype=["image", "grid"], kdims=axis_names, ).opts( clim=self.colormap_range, logz=self.logarithmic_colorscale, cmap=process_cmap("inferno", provider="matplotlib"), colorbar=True, xlim=xlim, ylim=ylim, ) if self.serverside_rendering is not None: # set colormap cmap_inferno = copy.copy(matplotlib_cm.get_cmap("inferno")) cmap_inferno.set_under("black") cmap_inferno.set_bad("lightgray") # Set z-axis normalization (other possible values are 'eq_hist', 'cbrt'). if self.logarithmic_colorscale: normalization = "log" else: normalization = "linear" # datashade/rasterize the image img = self.serverside_rendering( img, cmap=cmap_inferno, precompute=True, x_range=xlim, y_range=ylim, normalization=normalization, ) if self.mark_moon: # Put a ring around the location of the moon if it transits on this day eph = skyfield_wrapper.ephemeris # Start and end times of the CSD st = csd_to_unix(self.lsd.lsd) et = csd_to_unix(self.lsd.lsd + 1) moon_time, moon_dec = chime.transit_times( eph["moon"], st, et, return_dec=True ) if len(moon_time): lunar_transit = unix_to_csd(moon_time[0]) lunar_dec = moon_dec[0] lunar_ra = (lunar_transit % 1) * 360.0 lunar_za = np.sin(np.radians(lunar_dec - 49.0)) if self.transpose: img *= hv.Ellipse(lunar_ra, lunar_za, (5.5, 0.15)) else: img *= hv.Ellipse(lunar_za, lunar_ra, (0.04, 21)) if self.mark_day_time: # Calculate the sun rise/set times on this sidereal day # Start and end times of the CSD start_time = csd_to_unix(self.lsd.lsd) end_time = csd_to_unix(self.lsd.lsd + 1) times, rises = chime.rise_set_times( skyfield_wrapper.ephemeris["sun"], start_time, end_time, diameter=-10, ) sun_rise = 0 sun_set = 0 for t, r in zip(times, rises): if r: sun_rise = (unix_to_csd(t) % 1) * 360 else: sun_set = (unix_to_csd(t) % 1) * 360 # Highlight the day time data opts = { "color": "grey", "alpha": 0.5, "line_width": 1, "line_color": "black", "line_dash": "dashed", } if self.transpose: if sun_rise < sun_set: img *= hv.VSpan(sun_rise, sun_set).opts(**opts) else: img *= hv.VSpan(self.ylim[0], sun_set).opts(**opts) img *= hv.VSpan(sun_rise, self.ylim[1]).opts(**opts) else: if sun_rise < sun_set: img *= hv.HSpan(sun_rise, sun_set).opts(**opts) else: img *= hv.HSpan(self.ylim[0], sun_set).opts(**opts) img *= hv.HSpan(sun_rise, self.ylim[1]).opts(**opts) img.opts( # Fix height, but make width responsive height=self.height, responsive=True, shared_axes=True, bgcolor="lightgray", ) return panel.Row(img, width_policy="max")
def main(config_file=None, logging_params=DEFAULT_LOGGING): # Load config config = DEFAULTS.deepcopy() if config_file is not None: config.merge(NameSpace(load_yaml_config(config_file))) # Setup logging log.setup_logging(logging_params) logger = log.get_logger(__name__) ## Load data for flagging # Load fpga restarts time_fpga_restart = [] if config.fpga_restart_file is not None: with open(config.fpga_restart_file, 'r') as handler: for line in handler: time_fpga_restart.append( ephemeris.datetime_to_unix( ephemeris.timestr_to_datetime(line.split('_')[0]))) time_fpga_restart = np.array(time_fpga_restart) # Load housekeeping flag if config.housekeeping_file is not None: ftemp = TempData.from_acq_h5(config.housekeeping_file, datasets=["time_flag"]) else: ftemp = None # Load jump data if config.jump_file is not None: with h5py.File(config.jump_file, 'r') as handler: jump_time = handler["time"][:] jump_size = handler["jump_size"][:] else: jump_time = None jump_size = None # Load rain data if config.rain_file is not None: with h5py.File(config.rain_file, 'r') as handler: rain_ranges = handler["time_range_conservative"][:] else: rain_ranges = [] # Load data flags data_flags = {} if config.data_flags: finder.connect_database() flag_types = finder.DataFlagType.select() possible_data_flags = [] for ft in flag_types: possible_data_flags.append(ft.name) if ft.name in config.data_flags: new_data_flags = finder.DataFlag.select().where( finder.DataFlag.type == ft) data_flags[ft.name] = list(new_data_flags) # Set desired range of time start_time = (ephemeris.datetime_to_unix( datetime.datetime( *config.start_date)) if config.start_date is not None else None) end_time = (ephemeris.datetime_to_unix(datetime.datetime( *config.end_date)) if config.end_date is not None else None) ## Find gain files files = {} for src in config.sources: files[src] = sorted( glob.glob( os.path.join(config.directory, src.lower(), "%s_%s_lsd_*.h5" % ( config.prefix, src.lower(), )))) csd = {} for src in config.sources: csd[src] = np.array( [int(os.path.splitext(ff)[0][-4:]) for ff in files[src]]) for src in config.sources: logger.info("%s: %d files" % (src, len(csd[src]))) ## Remove files that occur during flag csd_flag = {} for src in config.sources: body = ephemeris.source_dictionary[src] csd_flag[src] = np.ones(csd[src].size, dtype=np.bool) for ii, cc in enumerate(csd[src][:]): ttrans = ephemeris.transit_times(body, ephemeris.csd_to_unix(cc))[0] if (start_time is not None) and (ttrans < start_time): csd_flag[src][ii] = False continue if (end_time is not None) and (ttrans > end_time): csd_flag[src][ii] = False continue # If requested, remove daytime transits if not config.include_daytime.get( src, config.include_daytime.default) and daytime_flag( ttrans)[0]: logger.info("%s CSD %d: daytime transit" % (src, cc)) csd_flag[src][ii] = False continue # Remove transits during HKP drop out if ftemp is not None: itemp = np.flatnonzero( (ftemp.time[:] >= (ttrans - config.transit_window)) & (ftemp.time[:] <= (ttrans + config.transit_window))) tempflg = ftemp['time_flag'][itemp] if (tempflg.size == 0) or ((np.sum(tempflg, dtype=np.float32) / float(tempflg.size)) < 0.50): logger.info("%s CSD %d: no housekeeping" % (src, cc)) csd_flag[src][ii] = False continue # Remove transits near jumps if jump_time is not None: njump = np.sum((jump_size > config.min_jump_size) & (jump_time > (ttrans - config.jump_window)) & (jump_time < ttrans)) if njump > config.max_njump: logger.info("%s CSD %d: %d jumps before" % (src, cc, njump)) csd_flag[src][ii] = False continue # Remove transits near rain for rng in rain_ranges: if (((ttrans - config.transit_window) <= rng[1]) and ((ttrans + config.transit_window) >= rng[0])): logger.info("%s CSD %d: during rain" % (src, cc)) csd_flag[src][ii] = False break # Remove transits during data flag for name, flag_list in data_flags.items(): if csd_flag[src][ii]: for flg in flag_list: if (((ttrans - config.transit_window) <= flg.finish_time) and ((ttrans + config.transit_window) >= flg.start_time)): logger.info("%s CSD %d: %s flag" % (src, cc, name)) csd_flag[src][ii] = False break # Print number of files left after flagging for src in config.sources: logger.info("%s: %d files (after flagging)" % (src, np.sum(csd_flag[src]))) ## Construct pair wise differences npair = len(config.diff_pair) shift = [nd * 24.0 * 3600.0 for nd in config.nday_shift] calmap = [] calpair = [] for (tsrc, csrc), sh in zip(config.diff_pair, shift): body_test = ephemeris.source_dictionary[tsrc] body_cal = ephemeris.source_dictionary[csrc] for ii, cc in enumerate(csd[tsrc]): if csd_flag[tsrc][ii]: test_transit = ephemeris.transit_times( body_test, ephemeris.csd_to_unix(cc))[0] cal_transit = ephemeris.transit_times(body_cal, test_transit + sh)[0] cal_csd = int(np.fix(ephemeris.unix_to_csd(cal_transit))) ttrans = np.sort([test_transit, cal_transit]) if cal_csd in csd[csrc]: jj = list(csd[csrc]).index(cal_csd) if csd_flag[csrc][jj] and not np.any( (time_fpga_restart >= ttrans[0]) & (time_fpga_restart <= ttrans[1])): calmap.append([ii, jj]) calpair.append([tsrc, csrc]) calmap = np.array(calmap) calpair = np.array(calpair) ntransit = calmap.shape[0] logger.info("%d total transit pairs" % ntransit) for ii in range(ntransit): t1 = ephemeris.transit_times( ephemeris.source_dictionary[calpair[ii, 0]], ephemeris.csd_to_unix(csd[calpair[ii, 0]][calmap[ii, 0]]))[0] t2 = ephemeris.transit_times( ephemeris.source_dictionary[calpair[ii, 1]], ephemeris.csd_to_unix(csd[calpair[ii, 1]][calmap[ii, 1]]))[0] logger.info("%s (%d) - %s (%d): %0.1f hr" % (calpair[ii, 0], csd_flag[calpair[ii, 0]][calmap[ii, 0]], calpair[ii, 1], csd_flag[calpair[ii, 1]][calmap[ii, 1]], (t1 - t2) / 3600.0)) # Determine unique diff pairs diff_name = np.array(['%s/%s' % tuple(cp) for cp in calpair]) uniq_diff, lbl_diff, cnt_diff = np.unique(diff_name, return_inverse=True, return_counts=True) ndiff = uniq_diff.size for ud, udcnt in zip(uniq_diff, cnt_diff): logger.info("%s: %d transit pairs" % (ud, udcnt)) ## Load gains inputmap = tools.get_correlator_inputs(datetime.datetime.utcnow(), correlator='chime') ninput = len(inputmap) nfreq = 1024 # Set up gain arrays gain = np.zeros((2, nfreq, ninput, ntransit), dtype=np.complex64) weight = np.zeros((2, nfreq, ninput, ntransit), dtype=np.float32) input_sort = np.zeros((2, ninput, ntransit), dtype=np.int) kcsd = np.zeros((2, ntransit), dtype=np.float32) timestamp = np.zeros((2, ntransit), dtype=np.float64) is_daytime = np.zeros((2, ntransit), dtype=np.bool) for tt in range(ntransit): for kk, (src, ind) in enumerate(zip(calpair[tt], calmap[tt])): body = ephemeris.source_dictionary[src] filename = files[src][ind] logger.info("%s: %s" % (src, filename)) temp = containers.StaticGainData.from_file(filename) freq = temp.freq[:] inputs = temp.input[:] isort = reorder_inputs(inputmap, inputs) inputs = inputs[isort] gain[kk, :, :, tt] = temp.gain[:, isort] weight[kk, :, :, tt] = temp.weight[:, isort] input_sort[kk, :, tt] = isort kcsd[kk, tt] = temp.attrs['lsd'] timestamp[kk, tt] = ephemeris.transit_times( body, ephemeris.csd_to_unix(kcsd[kk, tt]))[0] is_daytime[kk, tt] = daytime_flag(timestamp[kk, tt])[0] if np.any(isort != np.arange(isort.size)): logger.info("Input ordering has changed: %s" % ephemeris.unix_to_datetime( timestamp[kk, tt]).strftime("%Y-%m-%d")) logger.info("") inputs = np.array([(inp.id, inp.input_sn) for inp in inputmap], dtype=[('chan_id', 'u2'), ('correlator_input', 'S32')]) ## Load input flags inpflg = np.ones((2, ninput, ntransit), dtype=np.bool) min_flag_time = np.min(timestamp) - 7.0 * 24.0 * 60.0 * 60.0 max_flag_time = np.max(timestamp) + 7.0 * 24.0 * 60.0 * 60.0 flaginput_files = sorted( glob.glob( os.path.join(config.flaginput_dir, "*" + config.flaginput_suffix, "*.h5"))) if flaginput_files: logger.info("Found %d flaginput files." % len(flaginput_files)) tmp = andata.FlagInputData.from_acq_h5(flaginput_files, datasets=()) start, stop = [ int(yy) for yy in np.percentile( np.flatnonzero((tmp.time[:] >= min_flag_time) & (tmp.time[:] <= max_flag_time)), [0, 100]) ] cont = andata.FlagInputData.from_acq_h5(flaginput_files, start=start, stop=stop, datasets=['flag']) for kk in range(2): inpflg[kk, :, :] = cont.resample('flag', timestamp[kk], transpose=True) logger.info("Flaginput time offsets in minutes (pair %d):" % kk) logger.info( str( np.fix((cont.time[cont.search_update_time(timestamp[kk])] - timestamp[kk]) / 60.0).astype(np.int))) # Sort flags so they are in same order for tt in range(ntransit): for kk in range(2): inpflg[kk, :, tt] = inpflg[kk, input_sort[kk, :, tt], tt] # Do not apply input flag to phase reference for ii in config.index_phase_ref: inpflg[:, ii, :] = True ## Flag out gains with high uncertainty and frequencies with large fraction of data flagged frac_err = tools.invert_no_zero(np.sqrt(weight) * np.abs(gain)) flag = np.all((weight > 0.0) & (np.abs(gain) > 0.0) & (frac_err < config.max_uncertainty), axis=0) freq_flag = ((np.sum(flag, axis=(1, 2), dtype=np.float32) / float(np.prod(flag.shape[1:]))) > config.freq_threshold) if config.apply_rfi_mask: freq_flag &= np.logical_not(rfi.frequency_mask(freq)) flag = flag & freq_flag[:, np.newaxis, np.newaxis] good_freq = np.flatnonzero(freq_flag) logger.info("Number good frequencies %d" % good_freq.size) ## Generate flags with more conservative cuts on frequency c_flag = flag & np.all(frac_err < config.conservative.max_uncertainty, axis=0) c_freq_flag = ((np.sum(c_flag, axis=(1, 2), dtype=np.float32) / float(np.prod(c_flag.shape[1:]))) > config.conservative.freq_threshold) if config.conservative.apply_rfi_mask: c_freq_flag &= np.logical_not(rfi.frequency_mask(freq)) c_flag = c_flag & c_freq_flag[:, np.newaxis, np.newaxis] c_good_freq = np.flatnonzero(c_freq_flag) logger.info("Number good frequencies (conservative thresholds) %d" % c_good_freq.size) ## Apply input flags flag &= np.all(inpflg[:, np.newaxis, :, :], axis=0) ## Update flags based on beam flag if config.beam_flag_file is not None: dbeam = andata.BaseData.from_acq_h5(config.beam_flag_file) db_csd = np.floor(ephemeris.unix_to_csd(dbeam.index_map['time'][:])) for ii, name in enumerate(config.beam_flag_datasets): logger.info("Applying %s beam flag." % name) if not ii: db_flag = dbeam.flags[name][:] else: db_flag &= dbeam.flags[name][:] cnt = 0 for ii, dbc in enumerate(db_csd): this_csd = np.flatnonzero(np.any(kcsd == dbc, axis=0)) if this_csd.size > 0: logger.info("Beam flag for %d matches %s." % (dbc, str(kcsd[:, this_csd]))) flag[:, :, this_csd] &= db_flag[np.newaxis, :, ii, np.newaxis] cnt += 1 logger.info("Applied %0.1f percent of the beam flags" % (100.0 * cnt / float(db_csd.size), )) ## Flag inputs with large amount of missing data input_frac_flagged = ( np.sum(flag[good_freq, :, :], axis=(0, 2), dtype=np.float32) / float(good_freq.size * ntransit)) input_flag = input_frac_flagged > config.input_threshold for ii in config.index_phase_ref: logger.info("Phase reference %d has %0.3f fraction of data flagged." % (ii, input_frac_flagged[ii])) input_flag[ii] = True good_input = np.flatnonzero(input_flag) flag = flag & input_flag[np.newaxis, :, np.newaxis] logger.info("Number good inputs %d" % good_input.size) ## Calibrate gaincal = gain[0] * tools.invert_no_zero(gain[1]) frac_err_cal = np.sqrt(frac_err[0]**2 + frac_err[1]**2) count = np.sum(flag, axis=-1, dtype=np.int) stat_flag = count > config.min_num_transit ## Calculate phase amp = np.abs(gaincal) phi = np.angle(gaincal) ## Calculate polarisation groups pol_dict = {'E': 'X', 'S': 'Y'} cyl_dict = {2: 'A', 3: 'B', 4: 'C', 5: 'D'} if config.group_by_cyl: group_id = [ (inp.pol, inp.cyl) if tools.is_chime(inp) and (ii in good_input) else None for ii, inp in enumerate(inputmap) ] else: group_id = [ inp.pol if tools.is_chime(inp) and (ii in good_input) else None for ii, inp in enumerate(inputmap) ] ugroup_id = sorted([uidd for uidd in set(group_id) if uidd is not None]) ngroup = len(ugroup_id) group_list_noref = [ np.array([ gg for gg, gid in enumerate(group_id) if (gid == ugid) and gg not in config.index_phase_ref ]) for ugid in ugroup_id ] group_list = [ np.array([gg for gg, gid in enumerate(group_id) if gid == ugid]) for ugid in ugroup_id ] if config.group_by_cyl: group_str = [ "%s-%s" % (pol_dict[pol], cyl_dict[cyl]) for pol, cyl in ugroup_id ] else: group_str = [pol_dict[pol] for pol in ugroup_id] index_phase_ref = [] for gstr, igroup in zip(group_str, group_list): candidate = [ii for ii in config.index_phase_ref if ii in igroup] if len(candidate) != 1: index_phase_ref.append(None) else: index_phase_ref.append(candidate[0]) logger.info( "Phase reference: %s" % ', '.join(['%s = %s' % tpl for tpl in zip(group_str, index_phase_ref)])) ## Apply thermal correction to amplitude if config.amp_thermal.enabled: logger.info("Applying thermal correction.") # Load the temperatures tdata = TempData.from_acq_h5(config.amp_thermal.filename) index = tdata.search_sensors(config.amp_thermal.sensor)[0] temp = tdata.datasets[config.amp_thermal.field][index] temp_func = scipy.interpolate.interp1d(tdata.time, temp, **config.amp_thermal.interp) itemp = temp_func(timestamp) dtemp = itemp[0] - itemp[1] flag_func = scipy.interpolate.interp1d( tdata.time, tdata.datasets['flag'][index].astype(np.float32), **config.amp_thermal.interp) dtemp_flag = np.all(flag_func(timestamp) == 1.0, axis=0) flag &= dtemp_flag[np.newaxis, np.newaxis, :] for gstr, igroup in zip(group_str, group_list): pstr = gstr[0] thermal_coeff = np.polyval(config.amp_thermal.coeff[pstr], freq) gthermal = 1.0 + thermal_coeff[:, np.newaxis, np.newaxis] * dtemp[ np.newaxis, np.newaxis, :] amp[:, igroup, :] *= tools.invert_no_zero(gthermal) ## Compute common mode if config.subtract_common_mode_before: logger.info("Calculating common mode amplitude and phase.") cmn_amp, flag_cmn_amp = compute_common_mode(amp, flag, group_list_noref, median=False) cmn_phi, flag_cmn_phi = compute_common_mode(phi, flag, group_list_noref, median=False) # Subtract common mode (from phase only) logger.info("Subtracting common mode phase.") group_flag = np.zeros((ngroup, ninput), dtype=np.bool) for gg, igroup in enumerate(group_list): group_flag[gg, igroup] = True phi[:, igroup, :] = phi[:, igroup, :] - cmn_phi[:, gg, np.newaxis, :] for iref in index_phase_ref: if (iref is not None) and (iref in igroup): flag[:, iref, :] = flag_cmn_phi[:, gg, :] ## If requested, determine and subtract a delay template if config.fit_delay_before: logger.info("Fitting delay template.") omega = timing.FREQ_TO_OMEGA * freq tau, tau_flag, _ = construct_delay_template( omega, phi, c_flag & flag, min_num_freq_for_delay_fit=config.min_num_freq_for_delay_fit) # Compute residuals logger.info("Subtracting delay template.") phi = phi - tau[np.newaxis, :, :] * omega[:, np.newaxis, np.newaxis] ## Normalize by median over time logger.info("Calculating median amplitude and phase.") med_amp = np.zeros((nfreq, ninput, ndiff), dtype=amp.dtype) med_phi = np.zeros((nfreq, ninput, ndiff), dtype=phi.dtype) count_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=np.int) stat_flag_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=np.bool) def weighted_mean(yy, ww, axis=-1): return np.sum(ww * yy, axis=axis) * tools.invert_no_zero( np.sum(ww, axis=axis)) for dd in range(ndiff): this_diff = np.flatnonzero(lbl_diff == dd) this_flag = flag[:, :, this_diff] this_amp = amp[:, :, this_diff] this_amp_err = this_amp * frac_err_cal[:, :, this_diff] * this_flag.astype( np.float32) this_phi = phi[:, :, this_diff] this_phi_err = frac_err_cal[:, :, this_diff] * this_flag.astype( np.float32) count_by_diff[:, :, dd] = np.sum(this_flag, axis=-1, dtype=np.int) stat_flag_by_diff[:, :, dd] = count_by_diff[:, :, dd] > config.min_num_transit if config.weighted_mean == 2: logger.info("Calculating inverse variance weighted mean.") med_amp[:, :, dd] = weighted_mean(this_amp, tools.invert_no_zero(this_amp_err**2), axis=-1) med_phi[:, :, dd] = weighted_mean(this_phi, tools.invert_no_zero(this_phi_err**2), axis=-1) elif config.weighted_mean == 1: logger.info("Calculating uniform weighted mean.") med_amp[:, :, dd] = weighted_mean(this_amp, this_flag.astype(np.float32), axis=-1) med_phi[:, :, dd] = weighted_mean(this_phi, this_flag.astype(np.float32), axis=-1) else: logger.info("Calculating median value.") for ff in range(nfreq): for ii in range(ninput): if np.any(this_flag[ff, ii, :]): med_amp[ff, ii, dd] = wq.median( this_amp[ff, ii, :], this_flag[ff, ii, :].astype(np.float32)) med_phi[ff, ii, dd] = wq.median( this_phi[ff, ii, :], this_flag[ff, ii, :].astype(np.float32)) damp = np.zeros_like(amp) dphi = np.zeros_like(phi) for dd in range(ndiff): this_diff = np.flatnonzero(lbl_diff == dd) damp[:, :, this_diff] = amp[:, :, this_diff] * tools.invert_no_zero( med_amp[:, :, dd, np.newaxis]) - 1.0 dphi[:, :, this_diff] = phi[:, :, this_diff] - med_phi[:, :, dd, np.newaxis] # Compute common mode if not config.subtract_common_mode_before: logger.info("Calculating common mode amplitude and phase.") cmn_amp, flag_cmn_amp = compute_common_mode(damp, flag, group_list_noref, median=True) cmn_phi, flag_cmn_phi = compute_common_mode(dphi, flag, group_list_noref, median=True) # Subtract common mode (from phase only) logger.info("Subtracting common mode phase.") group_flag = np.zeros((ngroup, ninput), dtype=np.bool) for gg, igroup in enumerate(group_list): group_flag[gg, igroup] = True dphi[:, igroup, :] = dphi[:, igroup, :] - cmn_phi[:, gg, np.newaxis, :] for iref in index_phase_ref: if (iref is not None) and (iref in igroup): flag[:, iref, :] = flag_cmn_phi[:, gg, :] ## Compute RMS logger.info("Calculating RMS of amplitude and phase.") mad_amp = np.zeros((nfreq, ninput), dtype=amp.dtype) std_amp = np.zeros((nfreq, ninput), dtype=amp.dtype) mad_phi = np.zeros((nfreq, ninput), dtype=phi.dtype) std_phi = np.zeros((nfreq, ninput), dtype=phi.dtype) mad_amp_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=amp.dtype) std_amp_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=amp.dtype) mad_phi_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=phi.dtype) std_phi_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=phi.dtype) for ff in range(nfreq): for ii in range(ninput): this_flag = flag[ff, ii, :] if np.any(this_flag): std_amp[ff, ii] = np.std(damp[ff, ii, this_flag]) std_phi[ff, ii] = np.std(dphi[ff, ii, this_flag]) mad_amp[ff, ii] = 1.48625 * wq.median( np.abs(damp[ff, ii, :]), this_flag.astype(np.float32)) mad_phi[ff, ii] = 1.48625 * wq.median( np.abs(dphi[ff, ii, :]), this_flag.astype(np.float32)) for dd in range(ndiff): this_diff = this_flag & (lbl_diff == dd) if np.any(this_diff): std_amp_by_diff[ff, ii, dd] = np.std(damp[ff, ii, this_diff]) std_phi_by_diff[ff, ii, dd] = np.std(dphi[ff, ii, this_diff]) mad_amp_by_diff[ff, ii, dd] = 1.48625 * wq.median( np.abs(damp[ff, ii, :]), this_diff.astype(np.float32)) mad_phi_by_diff[ff, ii, dd] = 1.48625 * wq.median( np.abs(dphi[ff, ii, :]), this_diff.astype(np.float32)) ## Construct delay template if not config.fit_delay_before: logger.info("Fitting delay template.") omega = timing.FREQ_TO_OMEGA * freq tau, tau_flag, _ = construct_delay_template( omega, dphi, c_flag & flag, min_num_freq_for_delay_fit=config.min_num_freq_for_delay_fit) # Compute residuals logger.info("Subtracting delay template from phase.") resid = (dphi - tau[np.newaxis, :, :] * omega[:, np.newaxis, np.newaxis]) * flag.astype(np.float32) else: resid = dphi tau_count = np.sum(tau_flag, axis=-1, dtype=np.int) tau_stat_flag = tau_count > config.min_num_transit tau_count_by_diff = np.zeros((ninput, ndiff), dtype=np.int) tau_stat_flag_by_diff = np.zeros((ninput, ndiff), dtype=np.bool) for dd in range(ndiff): this_diff = np.flatnonzero(lbl_diff == dd) tau_count_by_diff[:, dd] = np.sum(tau_flag[:, this_diff], axis=-1, dtype=np.int) tau_stat_flag_by_diff[:, dd] = tau_count_by_diff[:, dd] > config.min_num_transit ## Calculate statistics of residuals std_resid = np.zeros((nfreq, ninput), dtype=phi.dtype) mad_resid = np.zeros((nfreq, ninput), dtype=phi.dtype) std_resid_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=phi.dtype) mad_resid_by_diff = np.zeros((nfreq, ninput, ndiff), dtype=phi.dtype) for ff in range(nfreq): for ii in range(ninput): this_flag = flag[ff, ii, :] if np.any(this_flag): std_resid[ff, ii] = np.std(resid[ff, ii, this_flag]) mad_resid[ff, ii] = 1.48625 * wq.median( np.abs(resid[ff, ii, :]), this_flag.astype(np.float32)) for dd in range(ndiff): this_diff = this_flag & (lbl_diff == dd) if np.any(this_diff): std_resid_by_diff[ff, ii, dd] = np.std(resid[ff, ii, this_diff]) mad_resid_by_diff[ff, ii, dd] = 1.48625 * wq.median( np.abs(resid[ff, ii, :]), this_diff.astype(np.float32)) ## Calculate statistics of delay template mad_tau = np.zeros((ninput, ), dtype=phi.dtype) std_tau = np.zeros((ninput, ), dtype=phi.dtype) mad_tau_by_diff = np.zeros((ninput, ndiff), dtype=phi.dtype) std_tau_by_diff = np.zeros((ninput, ndiff), dtype=phi.dtype) for ii in range(ninput): this_flag = tau_flag[ii] if np.any(this_flag): std_tau[ii] = np.std(tau[ii, this_flag]) mad_tau[ii] = 1.48625 * wq.median(np.abs(tau[ii]), this_flag.astype(np.float32)) for dd in range(ndiff): this_diff = this_flag & (lbl_diff == dd) if np.any(this_diff): std_tau_by_diff[ii, dd] = np.std(tau[ii, this_diff]) mad_tau_by_diff[ii, dd] = 1.48625 * wq.median( np.abs(tau[ii]), this_diff.astype(np.float32)) ## Define output res = { "timestamp": { "data": timestamp, "axis": ["div", "time"] }, "is_daytime": { "data": is_daytime, "axis": ["div", "time"] }, "csd": { "data": kcsd, "axis": ["div", "time"] }, "pair_map": { "data": lbl_diff, "axis": ["time"] }, "pair_count": { "data": cnt_diff, "axis": ["pair"] }, "gain": { "data": gaincal, "axis": ["freq", "input", "time"] }, "frac_err": { "data": frac_err_cal, "axis": ["freq", "input", "time"] }, "flags/gain": { "data": flag, "axis": ["freq", "input", "time"], "flag": True }, "flags/gain_conservative": { "data": c_flag, "axis": ["freq", "input", "time"], "flag": True }, "flags/count": { "data": count, "axis": ["freq", "input"], "flag": True }, "flags/stat": { "data": stat_flag, "axis": ["freq", "input"], "flag": True }, "flags/count_by_pair": { "data": count_by_diff, "axis": ["freq", "input", "pair"], "flag": True }, "flags/stat_by_pair": { "data": stat_flag_by_diff, "axis": ["freq", "input", "pair"], "flag": True }, "med_amp": { "data": med_amp, "axis": ["freq", "input", "pair"] }, "med_phi": { "data": med_phi, "axis": ["freq", "input", "pair"] }, "flags/group_flag": { "data": group_flag, "axis": ["group", "input"], "flag": True }, "cmn_amp": { "data": cmn_amp, "axis": ["freq", "group", "time"] }, "cmn_phi": { "data": cmn_phi, "axis": ["freq", "group", "time"] }, "amp": { "data": damp, "axis": ["freq", "input", "time"] }, "phi": { "data": dphi, "axis": ["freq", "input", "time"] }, "std_amp": { "data": std_amp, "axis": ["freq", "input"] }, "std_amp_by_pair": { "data": std_amp_by_diff, "axis": ["freq", "input", "pair"] }, "mad_amp": { "data": mad_amp, "axis": ["freq", "input"] }, "mad_amp_by_pair": { "data": mad_amp_by_diff, "axis": ["freq", "input", "pair"] }, "std_phi": { "data": std_phi, "axis": ["freq", "input"] }, "std_phi_by_pair": { "data": std_phi_by_diff, "axis": ["freq", "input", "pair"] }, "mad_phi": { "data": mad_phi, "axis": ["freq", "input"] }, "mad_phi_by_pair": { "data": mad_phi_by_diff, "axis": ["freq", "input", "pair"] }, "tau": { "data": tau, "axis": ["input", "time"] }, "flags/tau": { "data": tau_flag, "axis": ["input", "time"], "flag": True }, "flags/tau_count": { "data": tau_count, "axis": ["input"], "flag": True }, "flags/tau_stat": { "data": tau_stat_flag, "axis": ["input"], "flag": True }, "flags/tau_count_by_pair": { "data": tau_count_by_diff, "axis": ["input", "pair"], "flag": True }, "flags/tau_stat_by_pair": { "data": tau_stat_flag_by_diff, "axis": ["input", "pair"], "flag": True }, "std_tau": { "data": std_tau, "axis": ["input"] }, "std_tau_by_pair": { "data": std_tau_by_diff, "axis": ["input", "pair"] }, "mad_tau": { "data": mad_tau, "axis": ["input"] }, "mad_tau_by_pair": { "data": mad_tau_by_diff, "axis": ["input", "pair"] }, "resid_phi": { "data": resid, "axis": ["freq", "input", "time"] }, "std_resid_phi": { "data": std_resid, "axis": ["freq", "input"] }, "std_resid_phi_by_pair": { "data": std_resid_by_diff, "axis": ["freq", "input", "pair"] }, "mad_resid_phi": { "data": mad_resid, "axis": ["freq", "input"] }, "mad_resid_phi_by_pair": { "data": mad_resid_by_diff, "axis": ["freq", "input", "pair"] }, } ## Create the output container logger.info("Creating StabilityData container.") data = StabilityData() data.create_index_map( "div", np.array(["numerator", "denominator"], dtype=np.string_)) data.create_index_map("pair", np.array(uniq_diff, dtype=np.string_)) data.create_index_map("group", np.array(group_str, dtype=np.string_)) data.create_index_map("freq", freq) data.create_index_map("input", inputs) data.create_index_map("time", timestamp[0, :]) logger.info("Writing datsets to container.") for name, dct in res.iteritems(): is_flag = dct.get('flag', False) if is_flag: dset = data.create_flag(name.split('/')[-1], data=dct['data']) else: dset = data.create_dataset(name, data=dct['data']) dset.attrs['axis'] = np.array(dct['axis'], dtype=np.string_) data.attrs['phase_ref'] = np.array( [iref for iref in index_phase_ref if iref is not None]) # Determine the output filename and save results start_time, end_time = ephemeris.unix_to_datetime( np.percentile(timestamp, [0, 100])) tfmt = "%Y%m%d" night_str = 'night_' if not np.any(is_daytime) else '' output_file = os.path.join( config.output_dir, "%s_%s_%sraw_stability_data.h5" % (start_time.strftime(tfmt), end_time.strftime(tfmt), night_str)) logger.info("Saving results to %s." % output_file) data.save(output_file)
def view(self): if self.lsd is None: return panel.pane.Markdown("No data selected.") try: sens_container = self.data.load_file(self.revision, self.lsd, "sensitivity") except DataError as err: return panel.pane.Markdown( f"Error: {str(err)}. Please report this problem.") # Index map for ra (x-axis) sens_csd = csd(sens_container.time) index_map_ra = (sens_csd - self.lsd.lsd) * 360 axis_name_ra = "RA [degrees]" # Index map for frequency (y-axis) index_map_f = np.linspace(800.0, 400.0, 1024, endpoint=False) axis_name_f = "Frequency [MHz]" # Apply data selections if self.polarization == self.mean_pol_text: sel_pol = np.where((sens_container.index_map["pol"] == "XX") | (sens_container.index_map["pol"] == "YY"))[0] sens = np.squeeze(sens_container.measured[:, sel_pol]) sens = np.squeeze(np.nanmean(sens, axis=1)) else: sel_pol = np.where( sens_container.index_map["pol"] == self.polarization)[0] sens = np.squeeze(sens_container.measured[:, sel_pol]) if self.flag_mask: sens = np.where(self._flags_mask(index_map_ra).T, np.nan, sens) # Set flagged data to nan sens = np.where(sens == 0, np.nan, sens) if self.mask_rfi: try: rfi_container = self.data.load_file(self.revision, self.lsd, "rfi") except DataError as err: return panel.pane.Markdown( f"Error: {str(err)}. Please report this problem.") rfi = np.squeeze(rfi_container.mask[:]) # calculate percentage masked to print later rfi_percentage = round(np.count_nonzero(rfi) / rfi.size * 100) sens *= np.where(rfi, np.nan, 1) if self.divide_by_estimate: estimate = np.squeeze(sens_container.radiometer[:, sel_pol]) if self.polarization == self.mean_pol_text: estimate = np.squeeze(np.nanmean(estimate, axis=1)) estimate = np.where(estimate == 0, np.nan, estimate) sens = sens / estimate if self.transpose: sens = sens.T index_x = index_map_f index_y = index_map_ra axis_names = [axis_name_f, axis_name_ra] xlim, ylim = self.ylim, self.xlim else: index_x = index_map_ra index_y = index_map_f axis_names = [axis_name_ra, axis_name_f] xlim, ylim = self.xlim, self.ylim image_opts = { "clim": self.colormap_range, "logz": self.logarithmic_colorscale, "cmap": process_cmap("viridis", provider="matplotlib"), "colorbar": True, "xticks": [0, 60, 120, 180, 240, 300, 360], } if self.mask_rfi: image_opts["title"] = f"RFI mask: {rfi_percentage}%" overlay_opts = { "xlim": xlim, "ylim": ylim, } # Fill in missing data img = hv_image_with_gaps(index_x, index_y, sens, opts=image_opts, kdims=axis_names).opts(**overlay_opts) if self.serverside_rendering is not None: # set colormap cmap_inferno = copy.copy(matplotlib_cm.get_cmap("viridis")) # Set z-axis normalization (other possible values are 'eq_hist', 'cbrt'). if self.logarithmic_colorscale: normalization = "log" else: normalization = "linear" # datashade/rasterize the image img = self.serverside_rendering( img, cmap=cmap_inferno, precompute=True, x_range=xlim, y_range=ylim, normalization=normalization, # TODO: set xticks like above ) if self.mark_day_time: # Calculate the sun rise/set times on this sidereal day # Start and end times of the CSD start_time = csd_to_unix(self.lsd.lsd) end_time = csd_to_unix(self.lsd.lsd + 1) times, rises = chime.rise_set_times( skyfield_wrapper.ephemeris["sun"], start_time, end_time, diameter=-10, ) sun_rise = 0 sun_set = 0 for t, r in zip(times, rises): if r: sun_rise = (unix_to_csd(t) % 1) * 360 else: sun_set = (unix_to_csd(t) % 1) * 360 # Highlight the day time data opts = { "color": "grey", "alpha": 0.5, "line_width": 1, "line_color": "black", "line_dash": "dashed", } span = hv.HSpan if self.transpose else hv.VSpan if sun_rise < sun_set: img *= span(sun_rise, sun_set).opts(**opts) else: img *= span(self.xlim[0], sun_set).opts(**opts) img *= span(sun_rise, self.xlim[-1]).opts(**opts) img.opts( # Fix height, but make width responsive height=self.height, responsive=True, bgcolor="lightgray", shared_axes=True, ) return panel.Row(img, width_policy="max")