def save_callback(event): # Save pointing model to file outfile = file(opts.outfilebase + '.csv', 'w') # The original pointing model description string was comma-separated outfile.write(new_model.description.replace(" ", ", ")) outfile.close() logger.debug("Saved %d-parameter pointing model to '%s'" % (len(new_model), opts.outfilebase + '.csv')) # Turn data recarray into list of dicts and add residuals to the mix extended_data = [] for n in range(len(data)): rec_dict = dict(zip(data.dtype.names, data[n])) rec_dict['keep'] = int(keep[n]) rec_dict['old_residual_xel'] = rad2deg(old.residual_xel[n]) rec_dict['old_residual_el'] = rad2deg(old.residual_el[n]) rec_dict['new_residual_xel'] = rad2deg(new.residual_xel[n]) rec_dict['new_residual_el'] = rad2deg(new.residual_el[n]) extended_data.append(rec_dict) # Format the data similar to analyse_point_source_scans output CSV file, with four new columns at the end fields = '%(dataset)s, %(target)s, %(timestamp_ut)s, %(azimuth).7f, %(elevation).7f, ' \ '%(delta_azimuth).7f, %(delta_azimuth_std).7f, %(delta_elevation).7f, %(delta_elevation_std).7f, ' \ '%(data_unit)s, %(beam_height_I).7f, %(beam_height_I_std).7f, %(beam_width_I).7f, ' \ '%(beam_width_I_std).7f, %(baseline_height_I).7f, %(baseline_height_I_std).7f, %(refined_I).0f, ' \ '%(beam_height_HH).7f, %(beam_width_HH).7f, %(baseline_height_HH).7f, %(refined_HH).0f, ' \ '%(beam_height_VV).7f, %(beam_width_VV).7f, %(baseline_height_VV).7f, %(refined_VV).0f, ' \ '%(frequency).7f, %(flux).4f, %(temperature).2f, %(pressure).2f, %(humidity).2f, %(wind_speed).2f, ' \ '%(keep)d, %(old_residual_xel).7f, %(old_residual_el).7f, %(new_residual_xel).7f, %(new_residual_el).7f\n' field_names = [name.partition(')')[0] for name in fields[2:].split(', %(')] # Save residual data and flags to file outfile2 = file(opts.outfilebase + '_data.csv', 'w') outfile2.write('# antenna = %s\n' % antenna.description) outfile2.write(', '.join(field_names) + '\n') outfile2.writelines([fields % rec for rec in extended_data]) outfile2.close() save_button.color = '0.85' save_button.hovercolor = '0.95'
def _extract_location_from_katdata(self): self.metadata["DecRa"] = [] self.metadata["ElAz"] = [] f = self._katdata f.select(scans="track,scan") f.select(ants=f.ref_ant) for i, scan, target in f.scans(): f.select(scans=i) t = f.catalogue.targets[f.target_indices[0]] if (t.body_type == 'radec'): ra, dec = t.radec() ra, dec = katpoint.rad2deg(ra), katpoint.rad2deg(dec) self.metadata["DecRa"].append( "%f, %f" % (dec, katpoint.wrap_angle(ra, 360))) elif t.body_type == 'azel': az, el = t.azel() az, el = katpoint.rad2deg(az), katpoint.rad2deg(el) if -90 <= el <= 90: self.metadata["ElAz"].append( "%f, %f" % (el, katpoint.wrap_angle(az, 360))) else: self.metadata["ElAz"].append( "%f, %f" % ((np.clip(el, -90, 90)), katpoint.wrap_angle(az, 360)))
def save_callback(event): # Save pointing model to file outfile = file(opts.outfilebase + '.csv', 'w') outfile.write(new_model.description) outfile.close() logger.debug("Saved %d-parameter pointing model to '%s'" % (len(new_model.params), opts.outfilebase + '.csv')) # Turn data recarray into list of dicts and add residuals to the mix extended_data = [] for n in range(len(data)): rec_dict = dict(zip(data.dtype.names, data[n])) rec_dict['keep'] = int(keep[n]) rec_dict['old_residual_xel'] = rad2deg(old.residual_xel[n]) rec_dict['old_residual_el'] = rad2deg(old.residual_el[n]) rec_dict['new_residual_xel'] = rad2deg(new.residual_xel[n]) rec_dict['new_residual_el'] = rad2deg(new.residual_el[n]) extended_data.append(rec_dict) # Format the data similar to analyse_point_source_scans output CSV file, with four new columns at the end fields = '%(dataset)s, %(target)s, %(timestamp_ut)s, %(azimuth).7f, %(elevation).7f, ' \ '%(delta_azimuth).7f, %(delta_azimuth_std).7f, %(delta_elevation).7f, %(delta_elevation_std).7f, ' \ '%(data_unit)s, %(beam_height_I).7f, %(beam_height_I_std).7f, %(beam_width_I).7f, ' \ '%(beam_width_I_std).7f, %(baseline_height_I).7f, %(baseline_height_I_std).7f, %(refined_I).0f, ' \ '%(beam_height_HH).7f, %(beam_width_HH).7f, %(baseline_height_HH).7f, %(refined_HH).0f, ' \ '%(beam_height_VV).7f, %(beam_width_VV).7f, %(baseline_height_VV).7f, %(refined_VV).0f, ' \ '%(frequency).7f, %(flux).4f, %(temperature).2f, %(pressure).2f, %(humidity).2f, %(wind_speed).2f, ' \ '%(keep)d, %(old_residual_xel).7f, %(old_residual_el).7f, %(new_residual_xel).7f, %(new_residual_el).7f\n' field_names = [name.partition(')')[0] for name in fields[2:].split(', %(')] # Save residual data and flags to file outfile2 = file(opts.outfilebase + '_data.csv', 'w') outfile2.write('# antenna = %s\n' % antenna.description) outfile2.write(', '.join(field_names) + '\n') outfile2.writelines([fields % rec for rec in extended_data]) outfile2.close() save_button.color = '0.85' save_button.hovercolor = '0.95'
def update(self, timestamp): elapsed_time = timestamp - self._last_update if self._last_update else 0.0 self._last_update = timestamp if self.mode not in ('POINT', 'SCAN', 'STOW'): return az, el = self.pos_actual_scan_azim, self.pos_actual_scan_elev target = construct_azel_target(deg2rad(az), deg2rad(90.0)) \ if self.mode == 'STOW' else self._target if not target: return requested_az, requested_el = target.azel(timestamp, self.ant) requested_az = rad2deg(wrap_angle(requested_az)) requested_el = rad2deg(requested_el) delta_az = wrap_angle(requested_az - az, period=360.) delta_el = requested_el - el # Truncate velocities to slew rate limits and update position max_delta_az = self.max_slew_azim_dps * elapsed_time max_delta_el = self.max_slew_elev_dps * elapsed_time az += min(max(delta_az, -max_delta_az), max_delta_az) el += min(max(delta_el, -max_delta_el), max_delta_el) # Truncate coordinates to antenna limits az = min(max(az, self.real_az_min_deg), self.real_az_max_deg) el = min(max(el, self.real_el_min_deg), self.real_el_max_deg) # Check angular separation to determine lock dish = construct_azel_target(deg2rad(az), deg2rad(el)) error = rad2deg(target.separation(dish, timestamp, self.ant)) self.lock = error < self.lock_threshold # Update position sensors self.pos_request_scan_azim = requested_az self.pos_request_scan_elev = requested_el self.pos_actual_scan_azim = az self.pos_actual_scan_elev = el
def LoadHDF5(HDF5Filename, header=False): try: d = scape.DataSet(HDF5Filename, baseline=opts.baseline) except ValueError: print "WARNING:THIS FILE", HDF5Filename.split( '/' )[-1], "IS CORRUPTED AND SCAPE WILL NOT PROCESS IT, YOU MAY NEED TO REAUGMENT IT,BUT ITS AN EXPENSIVE TASK..!!" else: print "SUCCESSFULLY LOADED: Wellcome to scape Library and scape is busy processing your request" lo_freq = 4200.0 + d.freqs[len(d.freqs) / 2.0] # try to check all the rfi channels across all the channels rfi_chan_across_all = d.identify_rfi_channels() d = d.select(freqkeep=range(100, 420)) # rfi channels across fringe finder channels ( i.e frequancy range around 100 to 420) rfi_channels = d.identify_rfi_channels() freqs = d.freqs sky_frequency = d.freqs[rfi_channels] ant = d.antenna.name data_filename = os.path.splitext( os.path.basename(HDF5Filename))[0] + '.h5' # obs_date = os.path.splitext(os.path.basename(HDF5Filename))[0] #date = time.ctime(float(obs_date)) for compscan in d.compscans: azimuth = np.hstack( [scan.pointing['az'] for scan in compscan.scans]) elevation = np.hstack( [scan.pointing['el'] for scan in compscan.scans]) compscan_times = np.hstack( [scan.timestamps for scan in compscan.scans]) compscan_start_time = np.hstack( [scan.timestamps[0] for scan in compscan.scans]) compscan_end_time = np.hstack( [scan.timestamps[-1] for scan in compscan.scans]) middle_time = np.median(compscan_times, axis=None) obs_date = katpoint.Timestamp(middle_time) middle_start_time = np.median(compscan_start_time) middle_end_time = np.median(compscan_end_time) end_time = katpoint.Timestamp(middle_end_time) min_compscan_az = katpoint.rad2deg(azimuth.min()) max_compscan_az = katpoint.rad2deg(azimuth.max()) min_compscan_el = katpoint.rad2deg(elevation.min()) max_compscan_el = katpoint.rad2deg(elevation.max()) start_time = katpoint.Timestamp(middle_start_time) requested_azel = compscan.target.azel(middle_time) #ant_az = katpoint.rad2deg(np.array(requested_azel[0])) #ant_el = katpoint.rad2deg(np.array(requested_azel[1])) target = compscan.target.name f = file(opts.outfilebase + '.csv', 'a') for index in range(0, len(rfi_channels)): rfi_chan = rfi_channels[index] + 100 rfi_freq = freqs[rfi_channels[index]] f.write('%s, %s, %s, %s, %s,%f, %f,%f,%f, %f, %d, %f\n' % (data_filename,start_time, end_time, ant,target,min_compscan_az,max_compscan_az,\ min_compscan_el, max_compscan_el,lo_freq, rfi_chan, rfi_freq)) f.close()
def check_target(OVST, target, tmstmp=None, check=True): ''' :param tar: str or object of class Target if string: searches for the target in catalogue :param antennas: list list of antenna objects of class Antenna :param catalogue: Catalogue :param tmstmp: Timestamp :return: list with position tuples [(az1, el1), (az2, el2), ...] ''' antennas = OVST.active_antennas catalogue = OVST.Catalogue azel = [] if isinstance(target, Target): target = target elif isinstance( target, str ) and ',' in target: # Check if target has format: e.g. 'azel, 30, 60' target = Target(target) elif isinstance(target, str): target = catalogue[target] if not target: raise ValueError("Target not in Catalogue") if isinstance(tmstmp, str): if tmstmp and len(tmstmp) == 5: tmstmp += ':00' if tmstmp and len(tmstmp) == 8: tmstmp = str(datetime.now().date()) + ' ' + tmstmp if isinstance(tmstmp, (int, float)): tmstmp = Timestamp(tmstmp) if not tmstmp: tmstmp = Timestamp() for antenna in antennas: ae = target.azel(timestamp=tmstmp, antenna=antenna) azel.append([rad2deg(ae[0]), rad2deg(ae[1])]) az = [item[0] for item in azel] el = [item[1] for item in azel] if check: if all((OVST.az_limit[1] - 2 < i < OVST.az_limit[0] + 2 for i in az)) or all(i < OVST.el_limit[0] for i in el): raise LookupError( 'target cannot get focused at % s (target at azimuth %.2f and elevation %.2f).\n ' 'Allowed limits: az not in range of 150-173 and elevation > 25' % (tmstmp.local()[11:19], azel[0][0], azel[0][1])) return azel # format: [(az1, el1), (az2, el2), ...]
def update(fig): """Fit new pointing model and update plots.""" # Perform early redraw to improve interactivity of clicks (which typically change state of target dots) # Target state: 0 = flagged, 1 = unflagged, 2 = highlighted target_state = keep * ((target_index == fig.highlighted_target) + 1) # Specify colours of flagged, unflagged and highlighted dots, respectively, as RGBA tuples dot_colors = np.choose(target_state, np.atleast_3d(np.vstack([(1,1,1,1), (0,0,1,1), (1,0,0,1)]))).T for ax in fig.axes[:7]: ax.dots.set_facecolors(dot_colors) fig.canvas.draw() # Fit new pointing model and update results params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep], std_delta_az[keep], std_delta_el[keep], enabled_params) new.update(new_model) # Update rest of figure fig.texts[3].set_text("$\chi^2$ = %.1f" % new.chi2) fig.texts[4].set_text("all sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(target_index == fig.highlighted_target) fig.texts[5].set_text("target sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(keep) fig.texts[-1].set_text(unique_targets[fig.highlighted_target]) # Update model parameter strings for p, param in enumerate(display_params): fig.texts[2*p + 6].set_text(param_to_str(new_model, param) if enabled_params[param] else '') # HACK to convert sigmas to arcminutes, but not for P9 and P12 (which are scale factors) # This functionality should really reside inside the PointingModel class std_param = rad2deg(sigma_params[param]) * 60. if param not in [8, 11] else sigma_params[param] std_param_str = ("%.2f'" % std_param) if param not in [8, 11] else ("%.0e" % std_param) fig.texts[2*p + 7].set_text(std_param_str if enabled_params[param] and opts.use_stats else '') # Turn parameter string bold if it changed significantly from old value if np.abs(params[param] - old_model.values()[param]) > 3.0 * sigma_params[param]: fig.texts[2*p + 6].set_weight('bold') fig.texts[2*p + 7].set_weight('bold') else: fig.texts[2*p + 6].set_weight('normal') fig.texts[2*p + 7].set_weight('normal') daz_az, del_az, daz_el, del_el, quiver, before, after = fig.axes[:7] # Update quiver plot quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad(old.robust_sky_rms / 60.) quiver.quiv.set_segments(quiver_segments(new.residual_az, new.residual_el, quiver_scale)) quiver.quiv.set_color(np.choose(keep, np.atleast_3d(np.vstack([(0.3,0.3,0.3,0.2), (0.3,0.3,0.3,1)]))).T) # Update residual plots daz_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_xel) * 60.]) del_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_el) * 60.]) daz_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_xel) * 60.]) del_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_el) * 60.]) after.dots.set_offsets(np.c_[np.arctan2(new.residual_el, new.residual_xel), new.abs_sky_error]) resid_lim = 1.2 * max(new.abs_sky_error.max(), old.abs_sky_error.max()) daz_az.set_ylim(-resid_lim, resid_lim) del_az.set_ylim(-resid_lim, resid_lim) daz_el.set_ylim(-resid_lim, resid_lim) del_el.set_ylim(-resid_lim, resid_lim) before.set_ylim(0, resid_lim) after.set_ylim(0, resid_lim) # Redraw the figure fig.canvas.draw()
def LoadHDF5(HDF5Filename, header=False): try: d = scape.DataSet(HDF5Filename,baseline=opts.baseline) except ValueError: print "WARNING:THIS FILE",HDF5Filename.split('/')[-1], "IS CORRUPTED AND SCAPE WILL NOT PROCESS IT, YOU MAY NEED TO REAUGMENT IT,BUT ITS AN EXPENSIVE TASK..!!" else: print "SUCCESSFULLY LOADED: Wellcome to scape Library and scape is busy processing your request" lo_freq = 4200.0 + d.freqs[len(d.freqs)/2.0] # try to check all the rfi channels across all the channels rfi_chan_across_all = d.identify_rfi_channels() d = d.select(freqkeep=range(100,420)) # rfi channels across fringe finder channels ( i.e frequancy range around 100 to 420) rfi_channels = d.identify_rfi_channels() freqs = d.freqs sky_frequency = d.freqs[rfi_channels] ant = d.antenna.name data_filename = os.path.splitext(os.path.basename(HDF5Filename))[0]+'.h5' # obs_date = os.path.splitext(os.path.basename(HDF5Filename))[0] #date = time.ctime(float(obs_date)) for compscan in d.compscans: azimuth = np.hstack([scan.pointing['az'] for scan in compscan.scans]) elevation = np.hstack([scan.pointing['el'] for scan in compscan.scans]) compscan_times = np.hstack([scan.timestamps for scan in compscan.scans]) compscan_start_time = np.hstack([scan.timestamps[0] for scan in compscan.scans]) compscan_end_time = np.hstack([scan.timestamps[-1] for scan in compscan.scans]) middle_time = np.median(compscan_times, axis=None) obs_date = katpoint.Timestamp(middle_time) middle_start_time = np.median(compscan_start_time) middle_end_time = np.median(compscan_end_time) end_time = katpoint.Timestamp(middle_end_time) min_compscan_az = katpoint.rad2deg(azimuth.min()) max_compscan_az = katpoint.rad2deg(azimuth.max()) min_compscan_el = katpoint.rad2deg(elevation.min()) max_compscan_el = katpoint.rad2deg(elevation.max()) start_time = katpoint.Timestamp(middle_start_time) requested_azel = compscan.target.azel(middle_time) #ant_az = katpoint.rad2deg(np.array(requested_azel[0])) #ant_el = katpoint.rad2deg(np.array(requested_azel[1])) target = compscan.target.name f = file(opts.outfilebase + '.csv', 'a') for index in range(0,len(rfi_channels)): rfi_chan = rfi_channels[index] + 100 rfi_freq = freqs[rfi_channels[index]] f.write('%s, %s, %s, %s, %s,%f, %f,%f,%f, %f, %d, %f\n' % (data_filename,start_time, end_time, ant,target,min_compscan_az,max_compscan_az,\ min_compscan_el, max_compscan_el,lo_freq, rfi_chan, rfi_freq)) f.close()
def metrics(model, az, el, measured_delta_az, measured_delta_el, std_delta_az, std_delta_el): """Determine new residuals and sky RMS from pointing model.""" model_delta_az, model_delta_el = model.offset(az, el) residual_az = measured_delta_az - model_delta_az residual_el = measured_delta_el - model_delta_el residual_xel = residual_az * np.cos(el) abs_sky_error = rad2deg(np.sqrt(residual_xel**2 + residual_el**2)) * 3600. ###### On the calculation of all-sky RMS ##### # Assume the el and cross-el errors have zero mean, are distributed normally, and are uncorrelated # They are therefore described by a 2-dimensional circular Gaussian pdf with zero mean and *per-component* # standard deviation of sigma # The absolute sky error (== Euclidean length of 2-dim error vector) then has a Rayleigh distribution # The RMS sky error has a mean value of sqrt(2) * sigma, since each squared error term is the sum of # two squared Gaussian random values, each with an expected value of sigma^2. sky_rms = np.sqrt(np.mean(abs_sky_error**2)) # A more robust estimate of the RMS sky error is obtained via the median of the Rayleigh distribution, # which is sigma * sqrt(log(4)) -> convert this to the RMS sky error = sqrt(2) * sigma robust_sky_rms = np.median(abs_sky_error) * np.sqrt(2. / np.log(4.)) # The chi^2 value is what is actually optimised by the least-squares fitter (evaluated on the training set) chi2 = np.sum( ((residual_xel / std_delta_az)**2 + (residual_el / std_delta_el)**2)) text = [] #text.append("$\chi^2$ = %g " % chi2) text.append("All sky RMS = %.3f\" (robust %.3f\") " % (sky_rms, robust_sky_rms)) return sky_rms, robust_sky_rms, chi2, text
def update(self, model): """Determine new residuals and sky RMS from pointing model.""" model_delta_az, model_delta_el = model.offset(az, el) self.residual_az = measured_delta_az - model_delta_az self.residual_el = measured_delta_el - model_delta_el self.residual_xel = self.residual_az * np.cos(el) self.abs_sky_error = rad2deg(np.sqrt(self.residual_xel ** 2 + self.residual_el ** 2)) * 60. self.metrics(keep)
def fit_primary_beams(session, data_points): """Fit primary beams to receptor gains obtained at various offset pointings. Parameters ---------- session : :class:`katcorelib.observe.CaptureSession` object The active capture session data_points : dict mapping receptor index to (x, y, freq, gain, weight) seq Complex gains per receptor, as multiple records per offset and frequency Returns ------- beams : dict mapping receptor name to list of :class:`BeamPatternFit` Fitted primary beams, per receptor and per frequency chunk """ beams = {} # Iterate over receptors for a in data_points: data = np.rec.fromrecords(data_points[a], names='x,y,freq,gain,weight') data = data.reshape(-1, NUM_CHUNKS) ant = session.observers[a] # Iterate over frequency chunks but discard typically dodgy band edges for chunk in range(1, NUM_CHUNKS - 1): chunk_data = data[:, chunk] is_valid = np.nonzero(~np.isnan(chunk_data['gain']) & (chunk_data['weight'] > 0.))[0] chunk_data = chunk_data[is_valid] if len(chunk_data) == 0: continue expected_width = rad2deg(ant.beamwidth * lightspeed / chunk_data['freq'][0] / ant.diameter) # Convert power beamwidth to gain / voltage beamwidth expected_width = np.sqrt(2.0) * expected_width # XXX This assumes we are still using default ant.beamwidth of 1.22 # and also handles larger effective dish diameter in H direction expected_width = (0.8 * expected_width, 0.9 * expected_width) beam = BeamPatternFit((0., 0.), expected_width, 1.0) x = np.c_[chunk_data['x'], chunk_data['y']].T y = chunk_data['gain'] std_y = np.sqrt(1. / chunk_data['weight']) try: beam.fit(x, y, std_y) except TypeError: continue beamwidth_norm = beam.width / np.array(expected_width) center_norm = beam.center / beam.std_center user_logger.debug( "%s %2d %2d: height=%4.2f width=(%4.2f, %4.2f) " "center=(%7.2f, %7.2f)%s", ant.name, chunk, len(y), beam.height, beamwidth_norm[0], beamwidth_norm[1], center_norm[0], center_norm[1], ' X' if not beam.is_valid else '') # Store beam per frequency chunk and per receptor beams_freq = beams.get(ant.name, [None] * NUM_CHUNKS) beams_freq[chunk] = beam beams[ant.name] = beams_freq return beams
def _target_azel(self, target): """Get azimuth and elevation co-ordinates for a target at the current time. Parameters ---------- target: katpoint.Target The target of interest. Returns ------- az: float The azimuth co-ordinate of the target in degrees. el: float The elevation co-ordinate of the target in degrees. """ az, el = target.azel(simobserver.date) az = katpoint.rad2deg(az) el = katpoint.rad2deg(el) return az, el
def metrics(model,az,el,measured_delta_az, measured_delta_el ,std_delta_az ,std_delta_el,time_stamps): """Determine new residuals and sky RMS from pointing model.""" model_delta_az, model_delta_el = model.offset(az, el) residual_az = measured_delta_az - model_delta_az residual_el = measured_delta_el - model_delta_el residual_xel = residual_az * np.cos(el) abs_sky_error = rad2deg(np.sqrt(residual_xel ** 2 + residual_el ** 2)) offset_az_ts = pandas.Series(rad2deg(residual_xel), pandas.to_datetime(time_stamps, unit='s'))#.asfreq(freq='1s') offset_el_ts = pandas.Series(rad2deg(residual_el), pandas.to_datetime(time_stamps, unit='s'))#.asfreq(freq='1s') offset_total_ts = pandas.Series( abs_sky_error, pandas.to_datetime(time_stamps, unit='s'))#.asfreq(freq='1s') ###### On the calculation of all-sky RMS ##### # Assume the el and cross-el errors have zero mean, are distributed normally, and are uncorrelated # They are therefore described by a 2-dimensional circular Gaussian pdf with zero mean and *per-component* # standard deviation of sigma # The absolute sky error (== Euclidean length of 2-dim error vector) then has a Rayleigh distribution # The RMS sky error has a mean value of sqrt(2) * sigma, since each squared error term is the sum of # two squared Gaussian random values, each with an expected value of sigma^2. sky_rms = np.sqrt(np.mean(abs_sky_error ** 2)) # A more robust estimate of the RMS sky error is obtained via the median of the Rayleigh distribution, # which is sigma * sqrt(log(4)) -> convert this to the RMS sky error = sqrt(2) * sigma robust_sky_rms = np.median(abs_sky_error) * np.sqrt(2. / np.log(4.)) # The chi^2 value is what is actually optimised by the least-squares fitter (evaluated on the training set) #chi2 = np.sum(((residual_xel / std_delta_az) ** 2 + (residual_el / std_delta_el) ** 2)) text = 'All sky RMS = %.3f\" (robust %.3f\") ' % (sky_rms*3600, robust_sky_rms*3600) fig = plt.figure(figsize=(10,5)) #change_total = np.sqrt(change_el**2 + change_az**2) #(offset_el_ts*3600.).plot(label='Elevation',legend=True,grid=True,style='*') #(offset_az_ts*3600.).plot(label='Azimuth',legend=True,grid=True,style='*') (offset_total_ts*3600.).plot(label='Total pointing Error',legend=True,grid=True,style='*') dataset_str = ' ,'.join(np.unique(offsetdata['dataset']).tolist() ) #target_str = ' ,'.join(np.unique(offsetdata['target']).tolist() ) plt.title("Offset for Antenna:%s Dataset:%s \n %s " %(ant.name,dataset_str ,text),fontsize=10) plt.ylabel('Offset (arc-seconds)') plt.xlabel('Time (UTC)',fontsize=8) plt.figtext(0.89, 0.18,git_info(), horizontalalignment='right',fontsize=10) return fig
def select_and_average(filename, average_time): # Read a file into katdal, and average the data to the prescribed averaging time # Returns the weather data and timestamps with the correct averaging interval data = katdal.open(filename) raw_timestamps = data.sensor.timestamps raw_wind_speed = data.wind_speed raw_temperature = data.temperature raw_dumptime = data.dump_period # Get azel of each antenna and separation of each antenna sun = katpoint.Target('Sun, special', antenna=data.ants[0]) alltimestamps = data.timestamps[:] solar_seps = np.zeros_like(alltimestamps) for dumpnum, timestamp in enumerate(alltimestamps): azeltarget = katpoint.construct_azel_target( katpoint.deg2rad(data.az[dumpnum, 0]), katpoint.deg2rad(data.el[dumpnum, 0])) azeltarget.antenna = data.ants[0] solar_seps[dumpnum] = katpoint.rad2deg( azeltarget.separation(sun, timestamp)) #Determine number of dumps to average num_average = max(int(np.round(average_time / raw_dumptime)), 1) #Array of block indices indices = list( range(min(num_average, raw_timestamps.shape[0]), raw_timestamps.shape[0] + 1, min(num_average, raw_timestamps.shape[0]))) timestamps = np.average(np.array(np.split(raw_timestamps, indices)[:-1]), axis=1) wind_speed = np.average(np.array(np.split(raw_wind_speed, indices)[:-1]), axis=1) temperature = np.average(np.array(np.split(raw_temperature, indices)[:-1]), axis=1) dump_time = raw_dumptime * num_average return (timestamps, alltimestamps, wind_speed, temperature, dump_time, solar_seps, data.ants[0])
def referencemetrics(measured_delta_az, measured_delta_el): """Determine and sky RMS from pointing model.""" text = [] measured_delta_xel = measured_delta_az * np.cos( el) # scale due to sky shape abs_sky_error = np.ma.array(data=measured_delta_xel, mask=False) for target in set(offsetdata['target']): keep = np.ones((len(offsetdata)), dtype=np.bool) for key, targetv in enumerate(offsetdata['target']): keep[key] = target == targetv abs_sky_error[keep] = rad2deg( np.sqrt( (measured_delta_xel[keep] - measured_delta_xel[keep][0])**2 + (measured_delta_el[keep] - measured_delta_el[keep][0])**2)) * 60. abs_sky_error.mask[keep.nonzero()[0] [0]] = True # Mask the reference element text.append( "Test Target: '%s' Reference RMS = %.3f' (robust %.3f') (N=%i Data Points)" % (target, np.sqrt((abs_sky_error[keep]**2).mean()), np.ma.median(abs_sky_error[keep]) * np.sqrt(2. / np.log(4.)), keep.sum() - 1)) ###### On the calculation of all-sky RMS ##### # Assume the el and cross-el errors have zero mean, are distributed normally, and are uncorrelated # They are therefore described by a 2-dimensional circular Gaussian pdf with zero mean and *per-component* # standard deviation of sigma # The absolute sky error (== Euclidean length of 2-dim error vector) then has a Rayleigh distribution # The RMS sky error has a mean value of sqrt(2) * sigma, since each squared error term is the sum of # two squared Gaussian random values, each with an expected value of sigma^2. sky_rms = np.sqrt(np.ma.mean(abs_sky_error**2)) #print abs_sky_error # A more robust estimate of the RMS sky error is obtained via the median of the Rayleigh distribution, # which is sigma * sqrt(log(4)) -> convert this to the RMS sky error = sqrt(2) * sigma robust_sky_rms = np.ma.median(abs_sky_error) * np.sqrt(2. / np.log(4.)) text.append( "All Sky Reference RMS = %.3f' (robust %.3f') (N=%i Data Points) R.T.P.4" % (sky_rms, robust_sky_rms, abs_sky_error.count())) return text
def test_pointing(self): az, el = self.target.azel(self.timestamps, self.antennas[1]) assert_array_equal(self.dataset.az[:, 1], rad2deg(az)) assert_array_equal(self.dataset.el[:, 1], rad2deg(el)) ra, dec = self.target.radec(self.timestamps, self.antennas[0]) assert_array_almost_equal(self.dataset.ra[:, 0], rad2deg(ra), decimal=5) assert_array_almost_equal(self.dataset.dec[:, 0], rad2deg(dec), decimal=5) angle = self.target.parallactic_angle(self.timestamps, self.antennas[0]) # TODO: Check why this is so poor... see SR-1882 for progress on this assert_array_almost_equal(self.dataset.parangle[:, 0], rad2deg(angle), decimal=0) x, y = self.target.sphere_to_plane(az, el, self.timestamps, self.antennas[1]) assert_array_equal(self.dataset.target_x[:, 1], rad2deg(x)) assert_array_equal(self.dataset.target_y[:, 1], rad2deg(y))
# track the Moon for a short time session.label('track') user_logger.info("Initiating %g-second track on target %s" % (opts.track_duration, target.name,)) session.set_target(target) # Set the target session.track(target, duration=opts.track_duration, announce=False) # Set the target & mode = point ## 1) Track ephem target behind the moon user_logger.info("Sleeping for 2 minutes") time.sleep(120) user_logger.info("Setting to Ephem target") observer.date = ephem.now() moon = ephem.Moon(observer) moon.compute(observer) target = katpoint.construct_radec_target(moon.ra, moon.dec) session.label('track') user_logger.info("Initiating %g-second track on ephem target (%.2f, %.2f)" % (opts.track_duration, katpoint.rad2deg(float(moon.ra)), katpoint.rad2deg(float(moon.dec)),)) session.set_target(target) # Set the target session.track(target, duration=opts.track_duration, announce=False) # Set the target & mode = point ## 2) Track moon again user_logger.info("Sleeping for 2 minutes") time.sleep(120) for target in observation_sources.iterfilter(el_limit_deg=opts.horizon): user_logger.info(target) # track the Moon for a short time session.label('track') user_logger.info("Repeating %g-second track on target %s" % (opts.track_duration, target.name,)) session.set_target(target) # Set the target session.track(target, duration=opts.track_duration, announce=False) # Set the target & mode = point
markers = [] colors = ['b','g','r','c','m','y','k'] pointtypes = ['o','*','x','^','s','p','h','+','D','d','v','H','d','v'] for point in pointtypes: for color in colors: markers.append(str(color+point)) cat = katpoint.Catalogue(file(cat_filename),add_specials=False) #cat.add('Sun, special') # except maybe the sun cat.antenna = katpoint.Antenna('ant1, -30:43:17.3, 21:24:38.5, 1038.0, 12.0, 18.4 -8.7 0.0, -0:05:30.6 0 -0:00:03.3 0:02:14.2 0:00:01.6 -0:01:30.6 0:08:42.1, 1.22') target = cat.targets[0] t = katpoint.Timestamp().secs + np.arange(0, 24. * 60. * 60., 360.) lst = katpoint.rad2deg(target.antenna.local_sidereal_time(t)) / 15 fig = plt.figure(1) plt.clf() fig.set_size_inches(12, 4) plt.subplots_adjust(right=0.8) lines = list() labels = list() count = 0 fontP = FontProperties() fontP.set_size('small') for target in cat.targets: count = count + 1 elev = katpoint.rad2deg(target.azel(t)[1])
def select_environment(data, antenna, condition="normal"): """ Flag data for environmental conditions. Options are: normal: Wind < 9.8m/s, -5C < Temperature < 40C, DeltaTemp < 3deg in 20 minutes optimal: Wind < 2.9m/s, -5C < Temperature < 35C, DeltaTemp < 2deg in 10 minutes ideal: Wind < 1m/s, 19C < Temp < 21C, DeltaTemp < 1deg in 30 minutes """ # Convert timestamps to UTCseconds using katpoint timestamps = np.array( [katpoint.Timestamp(timestamp) for timestamp in data["timestamp_ut"]], dtype='float32') # Fit a smooth function (cubic spline) in time to the temperature and wind data raw_wind = data["wind_speed"] raw_temp = data["temperature"] fit_wind = interpolate.InterpolatedUnivariateSpline(timestamps, raw_wind, k=3) fit_temp = interpolate.InterpolatedUnivariateSpline(timestamps, raw_temp, k=3) #fit_temp_grad = fit_temp.derivative() # Day/Night # Night is defined as when the Sun is at -5deg. # Set up Sun target sun = katpoint.Target('Sun, special', antenna=antenna) sun_elevation = katpoint.rad2deg(sun.azel(timestamps)[1]) # Apply limits on environmental conditions good = [True] * data.shape[0] # Set up limits on environmental conditions if condition == 'ideal': windlim = 1. temp_low = 19. temp_high = 21. deltatemp = 1. / (30. * 60.) sun_elev_lim = -5. elif condition == 'optimum': windlim = 2.9 temp_low = -5. temp_high = 35. deltatemp = 2. / (10. * 60.) sun_elev_lim = -5. elif condition == 'normal': windlim = 9.8 temp_low = -5. temp_high = 40. deltatemp = 3. / (20. * 60.) sun_elev_lim = 100. #Daytime else: return good good = good & (fit_wind(timestamps) < windlim) good = good & ((fit_temp(timestamps) > temp_low) & (fit_temp(timestamps) < temp_high)) #Get the temperature gradient temp_grad = [ fit_temp.derivatives(timestamp)[1] for timestamp in timestamps ] good = good & (np.abs(temp_grad) < deltatemp) #Day or night? good = good & (sun_elevation < sun_elev_lim) return good
plt.subplot(121) plot_times = np.arange(gain_times[0] - 1000, gain_times[-1] + 1000, 100.) for n in range(4): plt.plot(plot_times - gain_times[0], amp_interps[n](plot_times), 'k') plt.plot(gain_times - gain_times[0], np.abs(ant_gains[n]), 'o', label='ant%d' % (n + 1)) plt.xlabel('Time since start (seconds)') plt.title('Gain amplitude') plt.legend(loc='upper left') plt.subplot(122) for n in range(4): plt.plot( plot_times - gain_times[0], katpoint.rad2deg(scape.stats.angle_wrap(phase_interps[n](plot_times))), 'k') plt.plot(gain_times - gain_times[0], katpoint.rad2deg(np.angle(ant_gains[n])), 'o', label='ant%d' % (n + 1)) plt.xlabel('Time since start (seconds)') plt.title('Gain phase (degrees)') plt.legend(loc='lower left') # Apply both bandpass and gain calibration to cal source visibilities final_cal_vis_samples = [vis.copy() for vis in cal_vis_samples] for vis, timestamps in zip(final_cal_vis_samples, cal_timestamps): # Interpolate antenna gains to timestamps of visibilities interp_ant_gains = np.zeros((4, len(timestamps)), dtype=np.complex64) for n in range(4):
def calc_pointing_offsets(session, beams, target, middle_time, temperature, pressure, humidity): """Calculate pointing offsets per receptor based on primary beam fits. Parameters ---------- session : :class:`katcorelib.observe.CaptureSession` object The active capture session beams : dict mapping receptor name to list of :class:`BeamPatternFit` Fitted primary beams, per receptor and per frequency chunk target : :class:`katpoint.Target` object The target on which offset pointings were done middle_time : float Unix timestamp at the middle of sequence of offset pointings, used to find the mean location of a moving target (and reference for weather) temperature, pressure, humidity : float Atmospheric conditions at middle time, used for refraction correction Returns ------- pointing_offsets : dict mapping receptor name to offset data (10 floats) Pointing offsets per receptor in degrees, stored as a sequence of - requested (az, el) after refraction (input to the pointing model), - full (az, el) offset, including contributions of existing pointing model, any existing adjustment and newly fitted adjustment (useful for fitting new pointing models as it is independent), - full (az, el) adjustment on top of existing pointing model, replacing any existing adjustment (useful for reference pointing), - relative (az, el) adjustment on top of existing pointing model and adjustment (useful for verifying reference pointing), and - rough uncertainty (standard deviation) of (az, el) adjustment. """ pointing_offsets = {} # Iterate over receptors for ant in sorted(session.observers): beams_freq = beams.get(ant.name, []) beams_freq = [b for b in beams_freq if b is not None and b.is_valid] if not beams_freq: user_logger.debug("%s had no valid primary beam fitted", ant.name) continue offsets_freq = np.array([b.center for b in beams_freq]) offsets_freq_std = np.array([b.std_center for b in beams_freq]) weights_freq = 1. / offsets_freq_std**2 # Do weighted average of offsets over frequency chunks results = np.average(offsets_freq, axis=0, weights=weights_freq, returned=True) pointing_offset = results[0] pointing_offset_std = np.sqrt(1. / results[1]) user_logger.debug("%s x=%+7.2f'+-%.2f\" y=%+7.2f'+-%.2f\"", ant.name, pointing_offset[0] * 60, pointing_offset_std[0] * 3600, pointing_offset[1] * 60, pointing_offset_std[1] * 3600) # Get existing pointing adjustment receptor = getattr(session.kat, ant.name) az_adjust = receptor.sensor.pos_adjust_pointm_azim.get_value() el_adjust = receptor.sensor.pos_adjust_pointm_elev.get_value() existing_adjustment = deg2rad(np.array((az_adjust, el_adjust))) # Start with requested (az, el) coordinates, as they apply # at the middle time for a moving target requested_azel = target.azel(timestamp=middle_time, antenna=ant) # Correct for refraction, which becomes the requested value # at input of pointing model rc = RefractionCorrection() def refract(az, el): # noqa: E306, E301 """Apply refraction correction as at the middle of scan.""" return [az, rc.apply(el, temperature, pressure, humidity)] refracted_azel = np.array(refract(*requested_azel)) # More stages that apply existing pointing model and/or adjustment pointed_azel = np.array(ant.pointing_model.apply(*refracted_azel)) adjusted_azel = pointed_azel + existing_adjustment # Convert fitted offset back to spherical (az, el) coordinates pointing_offset = deg2rad(np.array(pointing_offset)) beam_center_azel = target.plane_to_sphere(*pointing_offset, timestamp=middle_time, antenna=ant) # Now correct the measured (az, el) for refraction and then apply the # existing pointing model and adjustment to get a "raw" measured # (az, el) at the output of the pointing model stage beam_center_azel = refract(*beam_center_azel) beam_center_azel = ant.pointing_model.apply(*beam_center_azel) beam_center_azel = np.array(beam_center_azel) + existing_adjustment # Make sure the offset is a small angle around 0 degrees full_offset_azel = wrap_angle(beam_center_azel - refracted_azel) full_adjust_azel = wrap_angle(beam_center_azel - pointed_azel) relative_adjust_azel = wrap_angle(beam_center_azel - adjusted_azel) # Cheap 'n' cheerful way to convert cross-el uncertainty to azim form offset_azel_std = pointing_offset_std / \ np.array([np.cos(refracted_azel[1]), 1.]) # We store all variants of the pointing offset since we have it all # at our fingertips here point_data = np.r_[rad2deg(refracted_azel), rad2deg(full_offset_azel), rad2deg(full_adjust_azel), rad2deg(relative_adjust_azel), offset_azel_std] pointing_offsets[ant.name] = point_data return pointing_offsets
sources = katpoint.Catalogue(add_specials=False) user_logger.info('Performing flux calibration') ra, dec = target.apparent_radec(timestamp=timenow) targetName = target.name.replace(" ", "") print targetName target.name = targetName + '_O' sources.add(target) if opts.cal == 'fluxN': timenow = katpoint.Timestamp() sources = katpoint.Catalogue(add_specials=False) user_logger.info('Performing flux calibration') ra, dec = target.apparent_radec(timestamp=timenow) print target print "ra %f ,dec %f" % (katpoint.rad2deg(ra), katpoint.rad2deg(dec)) dec2 = dec + katpoint.deg2rad(1) print dec2, dec decS = dec - katpoint.deg2rad(1) targetName = target.name.replace(" ", "") print targetName print "newra %f newdec %f" % (katpoint.rad2deg(ra), katpoint.rad2deg(dec)) Ntarget = katpoint.construct_radec_target(ra, dec2) Ntarget.antenna = bf_ants Ntarget.name = targetName + '_N' sources.add(Ntarget) if opts.cal == 'fluxS': timenow = katpoint.Timestamp()
connectionstyle='arc3,rad=-0.2', fc='w', zorder=4)) # Set up figure with buttons plt.ion() fig = plt.figure(1, figsize=(15, 10)) fig.clear() # Store highlighted target index on figure object fig.highlighted_target = 0 # Axes to contain detail residual plots - initialise plots with old residuals ax = fig.add_axes([0.27, 0.74, 0.2, 0.2]) ax.axhline(0, color='k', zorder=0) plot_data_and_tooltip(ax, rad2deg(az), rad2deg(old.residual_xel) * 60.) ax.axis([-180., 180., -resid_lim, resid_lim]) ax.set_xticks([]) ax.yaxis.set_ticks_position('right') ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(arcmin_formatter)) ax.set_ylabel('Cross-EL offset') ax.set_title('RESIDUALS') ax = fig.add_axes([0.27, 0.54, 0.2, 0.2]) ax.axhline(0, color='k', zorder=0) plot_data_and_tooltip(ax, rad2deg(az), rad2deg(old.residual_el) * 60.) ax.axis([-180., 180., -resid_lim, resid_lim]) ax.set_xlabel('Azimuth (deg)') ax.yaxis.set_ticks_position('right') ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(arcmin_formatter)) ax.set_ylabel('EL offset')
ra, dec = [], [] for scan in d.scans: if scan.baseline: ra_dec = np.array([ katpoint.construct_azel_target(az, el).radec(t, d.antenna) for az, el, t in zip(scan.pointing['az'], scan.pointing['el'], scan.timestamps) ]) x, y = target.sphere_to_plane(ra_dec[:, 0], ra_dec[:, 1], scan.timestamps, coord_system='radec') ra.append(x) dec.append(y) # Remove pointing offset (order of a few arcminutes) ra = katpoint.rad2deg(np.hstack(ra) - d.compscans[0].beam.center[0]) dec = katpoint.rad2deg(np.hstack(dec) - d.compscans[0].beam.center[1]) power = np.hstack([ scan.pol('I').squeeze() - scan.baseline(scan.timestamps) for scan in d.scans if scan.baseline ]) power = np.abs(power) # Grid the raster scan to projected plane min_num_pixels = 201 interp = scape.fitting.Delaunay2DScatterFit(default_val=0.0, jitter=True) interp.fit([ra, dec], power) ra_range, dec_range = ra.max() - ra.min(), dec.max() - dec.min() # Use a square pixel size in projected plane pixel_size = min(ra_range, dec_range) / min_num_pixels grid_ra = np.arange(ra.min(), ra.max(), pixel_size)
if compscan.beam is not None and d.data_unit == 'K': gain_hh = compscan.beam.height / average_flux baseline_hh = compscan.baseline_height() if (ant.name + 'V') in h5.inputs: d.fit_beams_and_baselines(pol='VV', circular_beam=False) if compscan.beam is not None and d.data_unit == 'K': gain_vv = compscan.beam.height / average_flux baseline_vv = compscan.baseline_height() d.fit_beams_and_baselines(pol='I', circular_beam=True) beam = compscan.beam # Obtain middle timestamp of compound scan, where all pointing calculations are done compscan_times = np.hstack([scan.timestamps for scan in compscan.scans]) middle_time = np.median(compscan_times, axis=None) # Start with requested (az, el) coordinates, as they apply at the middle time for a moving target requested_azel = compscan.target.azel(middle_time) requested_azel = katpoint.rad2deg(np.array(requested_azel)) # The offset is very simplistic and doesn't take into account refraction (see a_p_s_s for more correct way) offset_azel = katpoint.rad2deg(np.array(beam.center)) if beam else np.zeros(2) user_logger.info("Antenna %s" % (ant.name,)) user_logger.info("------------") user_logger.info("Target = '%s', azel around (%.1f, %.1f) deg" % (compscan.target.name, requested_azel[0], requested_azel[1])) if beam is None: user_logger.info("No total power beam found") else: user_logger.info("Beam height = %g %s" % (beam.height, d.data_unit)) user_logger.info("Beamwidth = %.1f' (expected %.1f')" % (60 * katpoint.rad2deg(beam.width), 60 * katpoint.rad2deg(beam.expected_width))) user_logger.info("Beam offset = (%.1f', %.1f') (expected (0', 0'))" % (60 * offset_azel[0], 60 * offset_azel[1]))
duration=opts.track_duration, announce=False) # Set the target & mode = point ## 1) Track ephem target behind the moon user_logger.info("Sleeping for 2 minutes") time.sleep(120) user_logger.info("Setting to Ephem target") observer.date = ephem.now() moon = ephem.Moon(observer) moon.compute(observer) target = katpoint.construct_radec_target(moon.ra, moon.dec) session.label('track') user_logger.info( "Initiating %g-second track on ephem target (%.2f, %.2f)" % ( opts.track_duration, katpoint.rad2deg(float(moon.ra)), katpoint.rad2deg(float(moon.dec)), )) session.set_target(target) # Set the target session.track(target, duration=opts.track_duration, announce=False) # Set the target & mode = point ## 2) Track off target user_logger.info("Sleeping for 5 minutes") time.sleep(300) target = katpoint.construct_radec_target(moon.ra, moon.dec) session.label('track') user_logger.info( "Second set of %g-second track on ephem target (%.2f, %.2f)" % ( opts.track_duration,
ax.ann = ax.annotate('', xy=(0., 0.), xycoords='data', xytext=(32, 32), textcoords='offset points', size=14, va='bottom', ha='center', bbox=dict(boxstyle='round4', fc='w'), visible=False, zorder=5, arrowprops=dict(arrowstyle='-|>', shrinkB=10, connectionstyle='arc3,rad=-0.2', fc='w', zorder=4)) # Set up figure with buttons plt.ion() fig = plt.figure(1, figsize=(15, 10)) fig.clear() # Store highlighted target index on figure object fig.highlighted_target = 0 # Axes to contain detail residual plots - initialise plots with old residuals ax = fig.add_axes([0.27, 0.74, 0.2, 0.2]) ax.axhline(0, color='k', zorder=0) plot_data_and_tooltip(ax, rad2deg(az), rad2deg(old.residual_xel) * 60.) ax.axis([-180., 180., -resid_lim, resid_lim]) ax.set_xticks([]) ax.yaxis.set_ticks_position('right') ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(arcmin_formatter)) ax.set_ylabel('Cross-EL offset') ax.set_title('RESIDUALS') ax = fig.add_axes([0.27, 0.54, 0.2, 0.2]) ax.axhline(0, color='k', zorder=0) plot_data_and_tooltip(ax, rad2deg(az), rad2deg(old.residual_el) * 60.) ax.axis([-180., 180., -resid_lim, resid_lim]) ax.set_xlabel('Azimuth (deg)') ax.yaxis.set_ticks_position('right') ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(arcmin_formatter)) ax.set_ylabel('EL offset')
def referencemetrics(ant, data, num_samples_limit=1, power_sample_limit=0): """Determine and sky RMS from the antenna pointing model.""" """On the calculation of all-sky RMS Assume the el and cross-el errors have zero mean, are distributed normally, and are uncorrelated They are therefore described by a 2-dimensional circular Gaussian pdf with zero mean and *per-component* standard deviation of sigma The absolute sky error (== Euclidean length of 2-dim error vector) then has a Rayleigh distribution The RMS sky error has a mean value of sqrt(2) * sigma, since each squared error term is the sum of two squared Gaussian random values, each with an expected value of sigma^2. e.g. sky_rms = np.sqrt(np.mean((abs_sky_error-abs_sky_error.mean()) ** 2)) A more robust estimate of the RMS sky error is obtained via the median of the Rayleigh distribution, which is sigma * sqrt(log(4)) -> convert this to the RMS sky error = sqrt(2) * sigma e.g. robust_sky_rms = np.median(np.sqrt((abs_sky_error-abs_sky_error.mean())**2)) * np.sqrt(2. / np.log(4.)) """ #print type(data.shape[0] ), type(num_samples_limit) beam = data['beam_height_I'].mean() good_beam = (data['beam_height_I'] > beam * .8) * ( data['beam_height_I'] < beam * 1.2) * (data['beam_height_I'] > power_sample_limit) data = data[good_beam] if data.shape[0] > 0 and not np.all(good_beam): print("bad scan", data['target'][0]) if data.shape[0] >= num_samples_limit and ( data['timestamp'][-1] - data['timestamp'][0]) < 2000: # check all fitted Ipks are valid condition_str = ['ideal', 'optimal', 'normal', 'other'] condition = 3 text = [ ] #azimuth, elevation, delta_azimuth, delta_azimuth_std, delta_elevation, delta_elevation_std, measured_delta_xel = data['delta_azimuth'] * np.cos( data['elevation']) # scale due to sky shape abs_sky_error = measured_delta_xel model_delta_az, model_delta_el = ant.pointing_model.offset( data['azimuth'], data['elevation']) residual_az = data['delta_azimuth'] - model_delta_az residual_el = data['delta_elevation'] - model_delta_el residual_xel = residual_az * np.cos(data['elevation']) delta_xel_std = data['delta_azimuth_std'] * np.cos(data['elevation']) abs_sky_delta_std = rad2deg( np.sqrt(delta_xel_std**2 + data['delta_azimuth_std']**2)) * 3600 # make arc seconds #for i,val in enumerate(data): # print ("Test Target: '%s' fit accuracy %.3f\" "%(data['target'][i],abs_sky_delta_std[i])) abs_sky_error = rad2deg(np.sqrt((residual_xel)**2 + (residual_el)**2)) * 3600 condition = get_condition(data) rms = np.std(abs_sky_error) robust = np.median( np.abs(abs_sky_error - abs_sky_error.mean())) * np.sqrt( 2. / np.log(4.)) text.append( "Dataset:%s Test Target: '%s' Reference RMS = %.3f\" {fit-accuracy=%.3f\"} (robust %.3f\") (N=%i Data Points) ['%s']" % (data['dataset'][0], data['target'][0], rms, np.mean(abs_sky_delta_std), robust, data.shape[0], condition_str[condition])) output_data = data[0].copy() # make a copy of the rec array for i, x in enumerate(data[0]): # make an average of data if x.dtype.kind == 'f': # average floats output_data[i] = data.field(i).mean() else: output_data[i] = data.field(i)[0] sun = Target('Sun,special') source = Target( '%s,azel, %f,%f' % (output_data['target'], np.degrees( output_data['azimuth']), np.degrees(output_data['elevation']))) sun_sep = np.degrees( source.separation(sun, timestamp=output_data['timestamp'], antenna=ant)) output_data = recfunctions.append_fields(output_data, 'sun_sep', np.array([sun_sep]), dtypes=np.float, usemask=False, asrecarray=True) output_data = recfunctions.append_fields(output_data, 'condition', np.array([condition]), dtypes=np.float, usemask=False, asrecarray=True) output_data = recfunctions.append_fields(output_data, 'rms', np.array([rms]), dtypes=np.float, usemask=False, asrecarray=True) output_data = recfunctions.append_fields(output_data, 'robust', np.array([robust]), dtypes=np.float, usemask=False, asrecarray=True) output_data = recfunctions.append_fields(output_data, 'N', np.array([data.shape[0]]), dtypes=np.float, usemask=False, asrecarray=True) #### Debugging #residual_az = data['delta_azimuth'] - model_delta_az #residual_el = data['delta_elevation'] - model_delta_el #residual_xel = residual_az * np.cos(data['elevation']) output_data = recfunctions.append_fields( output_data, 'residual_az', np.array([rad2deg(residual_az.std()) * 3600]), dtypes=np.float, usemask=False, asrecarray=True) output_data = recfunctions.append_fields( output_data, 'residual_el', np.array([rad2deg(residual_el.std()) * 3600]), dtypes=np.float, usemask=False, asrecarray=True) output_data = recfunctions.append_fields( output_data, 'residual_xel', np.array([rad2deg(residual_xel.std()) * 3600]), dtypes=np.float, usemask=False, asrecarray=True) #print "%10s %i %3.1f, %s"%(data['target'][0],data['timestamp'][-1] - data['timestamp'][0], rms, str(np.degrees(data['delta_elevation']-data['delta_elevation'].mean())*3600) ) output_data['wind_speed'] = data['wind_speed'].max() return text, output_data else: return None, None
def reduce_compscan_inf(h5, channel_mask=None, chunks=16, return_raw=False, use_weights=False, compscan_index=None, debug=False): """Break the band up into chunks""" chunk_size = chunks rfi_static_flags = np.tile(False, h5.shape[0]) if len(channel_mask) > 0: pickle_file = open(channel_mask, "rb") rfi_static_flags = pickle.load(pickle_file) pickle_file.close() gains_p = {} stdv = {} calibrated = False # placeholder for calibration h5.select(compscans=compscan_index) a = [] if len(h5.target_indices) > 1: print("Warning multiple targets in the compscan") for scan in h5.scans(): a.append(h5.target_indices[0]) target = h5.catalogue.targets[np.median(a).astype( np.int)] # Majority Track compscan_index = h5.compscan_indices[0] #h5.select(targets=target,compscans=h5.compscan_indices[0]) # Majority Track in compscan if not return_raw: # Calculate average target flux over entire band flux_spectrum = h5.catalogue.targets[ h5.target_indices[0]].flux_density(h5.freqs) # include flags average_flux = np.mean( [flux for flux in flux_spectrum if not np.isnan(flux)]) temperature = np.mean(h5.temperature) pressure = np.mean(h5.pressure) humidity = np.mean(h5.humidity) wind_speed = np.mean(h5.wind_speed) wind_direction = np.degrees( np.angle(np.mean(np.exp( 1j * np.radians(h5.wind_direction))))) # Vector Mean sun = katpoint.Target('Sun, special') # Calculate pointing offset # Obtain middle timestamp of compound scan, where all pointing calculations are done middle_time = np.median(h5.timestamps[:], axis=None) # Start with requested (az, el) coordinates, as they apply at the middle time for a moving target requested_azel = target.azel(middle_time) # Correct for refraction, which becomes the requested value at input of pointing model rc = katpoint.RefractionCorrection() requested_azel = [ requested_azel[0], rc.apply(requested_azel[1], temperature, pressure, humidity) ] requested_azel = katpoint.rad2deg(np.array(requested_azel)) gaussian_centre = np.zeros((chunk_size * 2, 2, len(h5.ants))) gaussian_centre_std = np.zeros((chunk_size * 2, 2, len(h5.ants))) gaussian_width = np.zeros((chunk_size * 2, 2, len(h5.ants))) gaussian_width_std = np.zeros((chunk_size * 2, 2, len(h5.ants))) gaussian_height = np.zeros((chunk_size * 2, len(h5.ants))) gaussian_height_std = np.zeros((chunk_size * 2, len(h5.ants))) if debug: #debug_text debug_text = [] line = [] line.append("#AntennaPol") line.append("Target") line.append("Freq(MHz)") #MHz line.append("Centre Az") line.append("Centre El") line.append("Centre Az Std") line.append("Centre El Std") line.append("Centre Az Width") line.append("Centre El Width") line.append("Centre Az Width Std") line.append("Centre El Width Std") line.append("Height") line.append("Height Std") debug_text.append(','.join(line)) pols = ["H", "V"] # Put in logic for Intensity for i, pol in enumerate(pols): gains_p[pol] = [] pos = [] stdv[pol] = [] h5.select(pol=pol, corrprods='cross', ants=h5.antlist, targets=target, compscans=compscan_index) h5.bls_lookup = calprocs.get_bls_lookup(h5.antlist, h5.corr_products) for scan in h5.scans(): if scan[1] != 'track': continue valid_index = activity(h5, state='track') data = h5.vis[valid_index] if data.shape[0] > 0: # need at least one data point #g0 = np.ones(len(h5.ants),np.complex) if use_weights: weights = h5.weights[valid_index].mean(axis=0) else: weights = np.ones(data.shape[1:]).astype(np.float) gains_p[pol].append( calprocs.g_fit(data[:].mean(axis=0), weights, h5.bls_lookup, refant=0)) stdv[pol].append( np.ones( (data.shape[0], data.shape[1], len(h5.ants))).sum(axis=0)) #number of data points # Get coords in (x(time,ants),y(time,ants) coords) pos.append([ h5.target_x[valid_index, :].mean(axis=0), h5.target_y[valid_index, :].mean(axis=0) ]) for ant in range(len(h5.ants)): for chunk in range(chunks): if np.array(pos).shape[ 0] > 4: # Make sure there is enough data for a fit freq = slice(chunk * (h5.shape[1] // chunks), (chunk + 1) * (h5.shape[1] // chunks)) rfi = ~rfi_static_flags[freq] fitobj = fit.GaussianFit( np.array(pos)[:, :, ant].mean(axis=0), [1., 1.], 1) x = np.column_stack( (np.array(pos)[:, 0, ant], np.array(pos)[:, 1, ant])) y = np.abs( np.array(gains_p[pol])[:, freq, :][:, rfi, ant]).mean(axis=1) y_err = 1. / np.sqrt( np.array(stdv[pol])[:, freq, :][:, rfi, ant].sum(axis=1)) gaussian = fitobj.fit(x.T, y, y_err) #Fitted beam center is in (x, y) coordinates, in projection centred on target snr = np.abs(np.r_[gaussian.std / gaussian.std_std]) valid_fit = np.all( np.isfinite(np.r_[gaussian.mean, gaussian.std_mean, gaussian.std, gaussian.std_std, gaussian.height, gaussian.std_height, snr])) theta = np.sqrt((gaussian.mean**2).sum( )) # this is to see if the co-ord is out of range #The valid fit is needed because I have no way of working out if the gain solution was ok. if not valid_fit or np.any( theta > np.pi ): # the checks to see if the fit is ok gaussian_centre[chunk + i * chunk_size, :, ant] = np.nan gaussian_centre_std[chunk + i * chunk_size, :, ant] = np.nan gaussian_width[chunk + i * chunk_size, :, ant] = np.nan gaussian_width_std[chunk + i * chunk_size, :, ant] = np.nan gaussian_height[chunk + i * chunk_size, ant] = np.nan gaussian_height_std[chunk + i * chunk_size, ant] = np.nan else: # Convert this offset back to spherical (az, el) coordinates beam_center_azel = target.plane_to_sphere( np.radians(gaussian.mean[0]), np.radians(gaussian.mean[1]), middle_time) # Now correct the measured (az, el) for refraction and then apply the old pointing model # to get a "raw" measured (az, el) at the output of the pointing model beam_center_azel = [ beam_center_azel[0], rc.apply(beam_center_azel[1], temperature, pressure, humidity) ] beam_center_azel = h5.ants[ant].pointing_model.apply( *beam_center_azel) beam_center_azel = np.degrees( np.array(beam_center_azel)) gaussian_centre[chunk + i * chunk_size, :, ant] = beam_center_azel gaussian_centre_std[chunk + i * chunk_size, :, ant] = gaussian.std_mean gaussian_width[chunk + i * chunk_size, :, ant] = gaussian.std gaussian_width_std[chunk + i * chunk_size, :, ant] = gaussian.std_std gaussian_height[chunk + i * chunk_size, ant] = gaussian.height gaussian_height_std[chunk + i * chunk_size, ant] = gaussian.std_height if return_raw: return np.r_[gaussian_centre, gaussian_centre_std, gaussian_width, gaussian_width_std, gaussian_height, gaussian_height_std] else: ant_pointing = {} pols = ["HH", "VV", 'I'] pol_ind = {} pol_ind['HH'] = np.arange(0.0 * chunk_size, 1.0 * chunk_size, dtype=int) pol_ind['VV'] = np.arange(1.0 * chunk_size, 2.0 * chunk_size, dtype=int) pol_ind['I'] = np.arange(0.0 * chunk_size, 2.0 * chunk_size, dtype=int) for ant in range(len(h5.ants)): h_pol = ~np.isnan( gaussian_centre[pol_ind['HH'], :, ant]) & ~np.isnan( 1. / gaussian_centre_std[pol_ind['HH'], :, ant]) v_pol = ~np.isnan( gaussian_centre[pol_ind['VV'], :, ant]) & ~np.isnan( 1. / gaussian_centre_std[pol_ind['VV'], :, ant]) valid_solutions = np.count_nonzero( h_pol & v_pol ) # Note this is twice the number of solutions because of the Az & El parts print("%i valid solutions out of %s for %s on %s at %s " % (valid_solutions // 2, chunks, h5.ants[ant].name, target.name, str(katpoint.Timestamp(middle_time)))) if debug: #debug_text for pol_i, pol in enumerate(["H", "V"]): for chunk in range(chunks * pol_i, chunks * (pol_i + 1)): line = [] freq = h5.channel_freqs[slice( chunk * (h5.shape[1] // chunks), (chunk + 1) * (h5.shape[1] // chunks))].mean() line.append(h5.ants[ant].name + pol) line.append(target.name) line.append(str(freq / 1e6)) #MHz line.append(str(gaussian_centre[chunk, 0, ant])) line.append(str(gaussian_centre[chunk, 1, ant])) line.append(str(gaussian_centre_std[chunk, 0, ant])) line.append(str(gaussian_centre_std[chunk, 1, ant])) line.append(str(gaussian_width[chunk, 0, ant])) line.append(str(gaussian_width[chunk, 1, ant])) line.append(str(gaussian_width_std[chunk, 0, ant])) line.append(str(gaussian_width_std[chunk, 1, ant])) line.append(str(gaussian_height[chunk, ant])) line.append(str(gaussian_height_std[chunk, ant])) debug_text.append(','.join(line)) if valid_solutions // 2 > 0: # a bit overboard name = h5.ants[ant].name ant_pointing[name] = {} ant_pointing[name]["antenna"] = h5.ants[ant].name ant_pointing[name]["valid_solutions"] = valid_solutions ant_pointing[name]["dataset"] = h5.name.split('/')[-1].split( '.')[0] ant_pointing[name]["target"] = target.name ant_pointing[name]["timestamp_ut"] = str( katpoint.Timestamp(middle_time)) ant_pointing[name][ "data_unit"] = 'Jy' if calibrated else 'counts' ant_pointing[name]["frequency"] = h5.freqs.mean() ant_pointing[name]["flux"] = average_flux ant_pointing[name]["temperature"] = temperature ant_pointing[name]["pressure"] = pressure ant_pointing[name]["humidity"] = humidity ant_pointing[name]["wind_speed"] = wind_speed ant_pointing[name]["wind_direction"] = wind_direction # work out the sun's angle sun_azel = katpoint.rad2deg( np.array(sun.azel(middle_time, antenna=h5.ants[ant]))) ant_pointing[name]["sun_az"] = sun_azel.tolist()[0] ant_pointing[name]["sun_el"] = sun_azel.tolist()[1] ant_pointing[name]["timestamp"] = middle_time.astype(int) #Work out the Target position and the requested position # Start with requested (az, el) coordinates, as they apply at the middle time for a moving target requested_azel = target.azel(middle_time, antenna=h5.ants[ant]) # Correct for refraction, which becomes the requested value at input of pointing model rc = katpoint.RefractionCorrection() requested_azel = [ requested_azel[0], rc.apply(requested_azel[1], temperature, pressure, humidity) ] requested_azel = katpoint.rad2deg(np.array(requested_azel)) target_azel = katpoint.rad2deg( np.array(target.azel(middle_time, antenna=h5.ants[ant]))) ant_pointing[name]["azimuth"] = target_azel.tolist()[0] ant_pointing[name]["elevation"] = target_azel.tolist()[1] azel_beam = w_average( gaussian_centre[pol_ind["I"], :, ant], axis=0, weights=1. / gaussian_centre_std[pol_ind["I"], :, ant]**2) # Make sure the offset is a small angle around 0 degrees offset_azel = katpoint.wrap_angle(azel_beam - requested_azel, 360.) ant_pointing[name]["delta_azimuth"] = offset_azel.tolist()[0] ant_pointing[name]["delta_elevation"] = offset_azel.tolist()[1] ant_pointing[name]["delta_elevation_std"] = 0.0 #calc ant_pointing[name]["delta_azimuth_std"] = 0.0 #calc for pol in pol_ind: ant_pointing[name]["beam_height_%s" % (pol)] = w_average( gaussian_height[pol_ind[pol], ant], axis=0, weights=1. / gaussian_height_std[pol_ind[pol], ant]**2) ant_pointing[name]["beam_height_%s_std" % (pol)] = np.sqrt( np.nansum(1. / gaussian_height_std[pol_ind[pol], ant]**2)) ant_pointing[name]["beam_width_%s" % (pol)] = w_average( gaussian_width[pol_ind[pol], :, ant], axis=0, weights=1. / gaussian_width_std[pol_ind[pol], :, ant]**2).mean() ant_pointing[name]["beam_width_%s_std" % (pol)] = np.sqrt( np.nansum(1. / gaussian_width_std[pol_ind[pol], :, ant]**2)) ant_pointing[name]["baseline_height_%s" % (pol)] = 0.0 ant_pointing[name]["baseline_height_%s_std" % (pol)] = 0.0 ant_pointing[name][ "refined_%s" % (pol)] = 5.0 # I don't know what this means ant_pointing[name]["azimuth_%s" % (pol)] = w_average( gaussian_centre[pol_ind[pol], 0, ant], axis=0, weights=1. / gaussian_centre_std[pol_ind[pol], 0, ant]**2) ant_pointing[name]["elevation_%s" % (pol)] = w_average( gaussian_centre[pol_ind[pol], 1, ant], axis=0, weights=1. / gaussian_centre_std[pol_ind[pol], 1, ant]**2) ant_pointing[name]["azimuth_%s_std" % (pol)] = np.sqrt( np.nansum( 1. / gaussian_centre_std[pol_ind[pol], 0, ant]**2)) ant_pointing[name]["elevation_%s_std" % (pol)] = np.sqrt( np.nansum( 1. / gaussian_centre_std[pol_ind[pol], 1, ant]**2)) else: print("No (%i) solutions for %s on %s at %s " % (valid_solutions, h5.ants[ant].name, target.name, str(katpoint.Timestamp(middle_time)))) if debug: #debug_text debug_text.append('') base = "%s_%s" % (h5.name.split('/')[-1].split('.')[0], "interferometric_pointing_DEBUG") g = file('%s:Scan%i:%s' % (base, compscan_index, target.name), 'w') g.write("\n".join(debug_text)) g.close() return ant_pointing
def update(fig): """Fit new pointing model and update plots.""" # Perform early redraw to improve interactivity of clicks (which typically change state of target dots) # Target state: 0 = flagged, 1 = unflagged, 2 = highlighted target_state = keep * ((target_index == fig.highlighted_target) + 1) # Specify colours of flagged, unflagged and highlighted dots, respectively, as RGBA tuples dot_colors = np.choose( target_state, np.atleast_3d(np.vstack([(1, 1, 1, 1), (0, 0, 1, 1), (1, 0, 0, 1)]))).T for ax in fig.axes[:7]: ax.dots.set_facecolors(dot_colors) fig.canvas.draw() # Fit new pointing model and update results params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep], std_delta_az[keep], std_delta_el[keep], enabled_params) new.update(new_model) # Update rest of figure fig.texts[3].set_text("$\chi^2$ = %.1f" % new.chi2) fig.texts[4].set_text("all sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(target_index == fig.highlighted_target) fig.texts[5].set_text("target sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(keep) fig.texts[-1].set_text(unique_targets[fig.highlighted_target]) # Update model parameter strings for p, param in enumerate(display_params): fig.texts[2 * p + 6].set_text( new_model.param_str(param + 1, '%.3e') if enabled_params[param] else '') # HACK to convert sigmas to arcminutes, but not for P9 and P12 (which are scale factors) # This functionality should really reside inside the PointingModel class std_param = rad2deg(sigma_params[param]) * 60. if param not in [ 8, 11 ] else sigma_params[param] std_param_str = ("%.2f'" % std_param) if param not in [8, 11] else ("%.0e" % std_param) fig.texts[2 * p + 7].set_text( std_param_str if enabled_params[param] and opts.use_stats else '') # Turn parameter string bold if it changed significantly from old value if np.abs(params[param] - old_model.params[param]) > 3.0 * sigma_params[param]: fig.texts[2 * p + 6].set_weight('bold') fig.texts[2 * p + 7].set_weight('bold') else: fig.texts[2 * p + 6].set_weight('normal') fig.texts[2 * p + 7].set_weight('normal') daz_az, del_az, daz_el, del_el, quiver, before, after = fig.axes[:7] # Update quiver plot quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad( old.robust_sky_rms / 60.) quiver.quiv.set_segments( quiver_segments(new.residual_az, new.residual_el, quiver_scale)) quiver.quiv.set_color( np.choose( keep, np.atleast_3d(np.vstack([(0.3, 0.3, 0.3, 0.2), (0.3, 0.3, 0.3, 1)]))).T) # Update residual plots daz_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_xel) * 60.]) del_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_el) * 60.]) daz_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_xel) * 60.]) del_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_el) * 60.]) after.dots.set_offsets(np.c_[np.arctan2(new.residual_el, new.residual_xel), new.abs_sky_error]) resid_lim = 1.2 * max(new.abs_sky_error.max(), old.abs_sky_error.max()) daz_az.set_ylim(-resid_lim, resid_lim) del_az.set_ylim(-resid_lim, resid_lim) daz_el.set_ylim(-resid_lim, resid_lim) del_el.set_ylim(-resid_lim, resid_lim) before.set_ylim(0, resid_lim) after.set_ylim(0, resid_lim) # Redraw the figure fig.canvas.draw()
err_power = np.dot(resid, resid) print "Iteration %d: residual = %.2f, beam height = %.3f, width = %s, inner region = %d/%d" % \ (n, (prev_err_power - err_power) / err_power, beam.height, scape.beam_baseline.width_string(beam.width), \ np.sum(~outer), len(outer)) if (err_power == 0.0) or (prev_err_power - err_power) / err_power < 1e-5: break prev_err_power = err_power + 0.0 ##### PLOT RESULTS ##### start_time = timestamps.min() t = timestamps - start_time plt.plot(t, power, 'b') plt.plot(t, baseline(target_coords), 'g') plt.plot(t, beam(target_coords) + baseline(target_coords), 'g') plt.xlabel('Time in seconds since %s' % katpoint.Timestamp(start_time).local()) plt.ylabel('Power') plt.title('Quick beam and baseline fit') print "Beam offset is (%f, %f) deg" % (katpoint.rad2deg( beam.center[0]), katpoint.rad2deg(beam.center[1])) def set_delay(time_after_now, delay=None): t = katpoint.Timestamp() + time_after_now if delay is None: delay = cable2 - cable1 + tgt.geometric_delay(ant2, t, ant1)[0] print delay roach.req.poco_delay('0x', int(t.secs) * 1000, '%.9f' % (delay * 1000)) roach.req.poco_delay('0y', int(t.secs) * 1000, '%.9f' % (delay * 1000))
def next(self, event): azimuth, elevation = [], [] # Opening the out put file to write the RFI contaminated channels information if self.ind >= len(datasets): print "No more data to be reduced, hold on a second while we writting the extracted info in the file." fout = file(opts.outfilebase + '.csv', 'w') fout.write("FILENAME, FREQUCENCY[MHz], TIMESTAMPS, ABS_TIME, AZIMUTH, ELEVATION, PEAK POWER [dB]\n") fout.writelines([('%s, %0.4f, %0.4f, %s, %0.2f, %0.2f,%0.2f \n') % tuple(p) for p in output_data if p]) fout.close() # Opening the out put file to write only RFI conteminated channels fout2 = file(opts.outfilebase2 + '.txt', 'w') fout2.write("CONTAMINATED FREQUCENCY CHANNELS [MHz]\n") fout2.write("======================================\n") fout2.writelines([('%0.4f\n') % p for p in set(output_chan) if p]) fout2.close() # Time vs Frequency for selected channels figure fig_new = plt.figure() new_ax1 = fig_new.add_subplot(311,axisbg='#FFFFCC' ) new_ax1.plot(output_ts, output_chan,'k+',lw=3) new_ax1.set_xlabel("Time [s]") new_ax1.set_ylabel("Frequency [MHz]") new_ax2 = fig_new.add_subplot(312, axisbg="#FFFFCC") new_ax2.plot(output_az,output_chan,'g+', lw=3) new_ax2.set_xlabel("Azimuth [Deg]") new_ax2.set_ylabel("Frequency [MHz]") new_ax3 = fig_new.add_subplot(313, axisbg='#FFFFCC') new_ax3.plot(output_az, output_el, 'r+', lw=3) new_ax3.set_xlabel("Azimuth [Deg]") new_ax3.set_ylabel("Elevation [Deg]") print "We done writting in to a file,look at the frequency vs time plot to see the RFI mapping as a function of time" print "The RFI contaminated channels are:", set(output_chan) plt.show() sys.exit() self.filename = datasets[self.ind] try: #logger.info("Loading dataset %s , File size is %fMB, This is File number %s" % (os.path.basename(self.filename),os.path.getsize(self.filename)/1e6,self.ind)) logger.info("Loading dataset %s , File size is %fMB, This is File number %s" % (os.path.basename(self.filename),os.path.getsize(self.filename),self.ind)) current_dataset = DataSet(self.filename, baseline=opts.baseline) out_filename =os.path.basename(self.filename) start_freq_channel = int(opts.freq_keep.split(',')[0]) end_freq_channel = int(opts.freq_keep.split(',')[1]) current_dataset = current_dataset.select(freqkeep=range(start_freq_channel, end_freq_channel+1)) current_dataset = current_dataset.select(labelkeep='scan', copy=False) if len(current_dataset.compscans) == 0 or len(current_dataset.scans) == 0: logger.warning('No scans found in file, skipping data set') # try to extract antenna target points per each timestamps for cscan in current_dataset.compscans: target = cscan.target.name az = np.hstack([scan.pointing['az'] for scan in cscan.scans]) el = np.hstack([scan.pointing['el'] for scan in cscan.scans]) ts = np.hstack([scan.timestamps for scan in cscan.scans]) azimuth.extend(katpoint.rad2deg(az)),elevation.extend(katpoint.rad2deg(el)) azimuth, elevation = np.array(azimuth), np.array(elevation) ts,f,amp = extract_xyz_data(current_dataset,'abs_time','freq','amp') power,freq = amp.data,f.data t = np.hstack(ts.data) base_freq = freq[0] p = np.hstack(power) T,F = np.meshgrid(t,base_freq) A,F = np.meshgrid(azimuth,base_freq) E,F = np.meshgrid(elevation,base_freq) AA = A.ravel() EE = E.ravel() TT = T.ravel() FF = F.ravel() PP = p.ravel() def onselect_next(eclick,erelease): global output_data, output_chan, output_ts xmin = min(eclick.xdata, erelease.xdata) xmax = max(eclick.xdata, erelease.xdata) ymin = min(eclick.ydata, erelease.ydata) ymax = max(eclick.ydata, erelease.ydata) ind = (FF >= xmin) & (FF <= xmax) & (PP >= ymin) & (PP <= ymax) selected_freq = FF[ind] selected_amp = 10.0*np.log10(PP[ind]) selected_ts = TT[ind] selected_az = AA[ind] selected_el = EE[ind] print "SUCCESSFUL, CLICK AND DRAG TO SELECT THE NEXT RFI CHANNELS OR NEXT TO LOAD NEW DATASET" #sorting with increasng X_new indices = np.lexsort(keys = (selected_ts, selected_freq)) for index in indices: output_data.append([out_filename, selected_freq[index],selected_ts[index], katpoint.Timestamp(selected_ts[index]).local(), selected_az[index], selected_el[index], selected_amp[index]]) for point in output_data: output_chan.append(point[1]) output_ts.append(point[2]) output_az.append(point[4]) output_el.append(point[5]) def toggle_selector_next(event): print ' Key pressed.' if event.key in ['Q', 'q'] and toggle_selector.RS.active: print ' RectangleSelector deactivated.' toggle_selector_next.RS.set_active(False) if event.key in ['A', 'a'] and not toggle_selector_next.RS.active: print ' RectangleSelector activated.' toggle_selector_next.RS.set_active(True) # New Figure for the current data set current_ax.clear() plt.subplots_adjust(bottom=0.2) current_ax.plot(FF,PP, '+') current_ax.set_title("CLICK AND DRAG TO SELECT RFI CHAN") current_ax.set_xlabel('Frequency Channels [MHz]', bbox=dict(facecolor='red')) current_ax.set_ylabel('Power [Count]', bbox=dict(facecolor='red')) plt.draw() print "NEW DATA SET SUCCESSFLY LOADED, CLICK AND DRAG TO SELECT THE RFI CONTAMINATED CHANNELS OR NEXT TO CONTINUE" toggle_selector_next.RS = RectangleSelector(current_ax, onselect_next, drawtype='box') plt.connect('key_press_event', toggle_selector_next) except ValueError: print os.path.basename(self.filename), "DATA CORUPTED, PLEASE CLICK NEXT TO LOAD ANOTHER DATASET" self.ind +=1
# """) f.writelines(np.sort(outlines)) f.close() # Test the catalogue ant = katpoint.Antenna('KAT7, -30:43:16.71, 21:24:35.86, 1055, 12.0') cat = katpoint.Catalogue(open('bae_optical_pointing_sources.csv'), add_specials=False, antenna=ant) timestamp = katpoint.Timestamp() ra, dec = np.array([t.radec(timestamp) for t in cat]).transpose() constellation = [ t.aliases[0].partition(' ')[2][:3] if t.aliases else 'SOL' for t in cat ] ra, dec = katpoint.rad2deg(ra), katpoint.rad2deg(dec) az, el = np.hstack([ targ.azel([ katpoint.Timestamp(timestamp + t) for t in range(0, 24 * 3600, 30 * 60) ]) for targ in cat ]) az, el = katpoint.rad2deg(az), katpoint.rad2deg(el) plt.figure(1) plt.clf() for n, c in enumerate(constellation): plt.text(ra[n], dec[n], c, ha='left', va='center', size='xx-small') plt.axis([0, 360, -90, 90]) plt.xlabel('Right Ascension (degrees)') plt.ylabel('Declination (degrees)')
def main(): # Parse command-line options and arguments parser = optparse.OptionParser( usage='%prog [options] <data file> [<data file> ...]', description='Display a horizon mask from a set of data files.') parser.add_option( '-a', '--baseline', dest='baseline', type="string", metavar='BASELINE', default='A1A1', help= "Baseline to load (e.g. 'A1A1' for antenna 1), default is first single-dish baseline in file" ) parser.add_option('-o', '--output', dest='output', type="string", metavar='OUTPUTFILE', default=None, help="Write out intermediate h5 file") parser.add_option('-s', '--split', dest='split', action="store_true", metavar='SPLIT', default=False, help="Whether to split each horizon plot in half") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help="Degrees to rotate azimuth window by.") parser.add_option( '--temp-limit', dest='temp_limit', type='float', default=40.0, help= "The Tempreture Limit to make the cut-off for the mask. This is calculated " "as the T_sys at zenith plus the atmospheric noise contrabution at 10 degrees" "elevation as per R.T. 199 .") parser.add_option( "-n", "--nd-models", help="Name of optional directory containing noise diode model files") (opts, args) = parser.parse_args() # Check arguments if len(args) < 1: raise RuntimeError('Please specify the data file to reduce') # Load data set gridtemp = [] for filename in args: print 'Loading baseline', opts.baseline, 'from data file', filename d = scape.DataSet(filename, baseline=opts.baseline, nd_models=opts.nd_models) if len(d.freqs) > 1: # Only keep main scans (discard slew and cal scans) a d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d, width=7, sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average all frequency channels into one band d.average() # Extract azimuth and elevation angle from (azel) target associated with scan, in degrees azimuth, elevation, temp = [], [], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:, 0] * s.pol('VV')[:, 0]))) assert len(azimuth) == len(elevation) == len(temp), "sizes don't match" data = (azimuth, elevation, temp) np.array(azimuth) < -89 print "Gridding the data" print "data shape = ", np.shape(data[0] + ( np.array(azimuth)[np.array(azimuth) < -89] + 360.0).tolist()) print np.shape(data[1] + np.array(elevation)[np.array(azimuth) < -89].tolist()) print np.shape(data[2] + np.array(temp)[np.array(azimuth) < -89].tolist()) gridtemp.append( mlab.griddata( data[0] + (np.array(azimuth)[np.array(azimuth) < -89] + 360.0).tolist(), data[1] + np.array(elevation)[np.array(azimuth) < -89].tolist(), data[2] + np.array(temp)[np.array(azimuth) < -89].tolist(), np.arange(-90, 271, 1), np.arange(4, 16, 0.1))) # The +361 is to ensure that the point are well spaced, #this offset is not a problem as it is just for sorting out a boundery condition print "Completed Gridding the data" print "Making the mask" mask = gridtemp[0] >= opts.temp_limit for grid in gridtemp: mask = mask * (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90, 271, 1)), 2)) for i, az in enumerate(np.arange(-90, 271, 1)): print 'at az %f' % (az, ) maskr[i] = az, np.max(elevation) for j, el in enumerate(np.arange(4, 16, 0.1)): if ~mask.data[j, i] and ~mask.mask[j, i]: maskr[i] = az, el break np.savetxt('horizon_mask_%s.dat' % (opts.baseline), maskr[1:, :])
if len(observation_sources.filter(el_limit_deg=opts.horizon)) == 0: user_logger.warning( "No targets are currently visible - please re-run the script later" ) else: # Start capture session, which creates HDF5 file with start_session(kat, **vars(opts)) as session: session.standard_setup(**vars(opts)) session.capture_start() # Iterate through source list, picking the next one that is up for target in observation_sources.iterfilter( el_limit_deg=opts.horizon): user_logger.info(target) [ra, dec] = target.radec() (tra, tdec) = (katpoint.rad2deg(float(ra)), katpoint.rad2deg(float(dec))) session.label('track') user_logger.info( "Initiating %g-second track on target (%.2f, %.2f)" % ( opts.track_duration, tra, tdec, )) session.set_target(target) # Set the target session.track(target, duration=opts.track_duration, announce=False) # Set the target & mode = point for dra in [-1, 0, 1]: for ddec in [-1, 0, 1]: [ra, dec] = target.radec()