def track_line(linepoints, duration): target = construct_azel_target(deg2rad(linepoints[0][0]), deg2rad(linepoints[0][1])) mode = obs_mode('Line', linePoints=linepoints, observationDuration=duration) return trackq.track(target, duration, mode=mode)
def update(self, timestamp): elapsed_time = timestamp - self._last_update if self._last_update else 0.0 self._last_update = timestamp if self.mode not in ('POINT', 'SCAN', 'STOW'): return az, el = self.pos_actual_scan_azim, self.pos_actual_scan_elev target = construct_azel_target(deg2rad(az), deg2rad(90.0)) \ if self.mode == 'STOW' else self._target if not target: return requested_az, requested_el = target.azel(timestamp, self.ant) requested_az = rad2deg(wrap_angle(requested_az)) requested_el = rad2deg(requested_el) delta_az = wrap_angle(requested_az - az, period=360.) delta_el = requested_el - el # Truncate velocities to slew rate limits and update position max_delta_az = self.max_slew_azim_dps * elapsed_time max_delta_el = self.max_slew_elev_dps * elapsed_time az += min(max(delta_az, -max_delta_az), max_delta_az) el += min(max(delta_el, -max_delta_el), max_delta_el) # Truncate coordinates to antenna limits az = min(max(az, self.real_az_min_deg), self.real_az_max_deg) el = min(max(el, self.real_el_min_deg), self.real_el_max_deg) # Check angular separation to determine lock dish = construct_azel_target(deg2rad(az), deg2rad(el)) error = rad2deg(target.separation(dish, timestamp, self.ant)) self.lock = error < self.lock_threshold # Update position sensors self.pos_request_scan_azim = requested_az self.pos_request_scan_elev = requested_el self.pos_actual_scan_azim = az self.pos_actual_scan_elev = el
def setUp(self): az_range = katpoint.deg2rad(np.arange(-185.0, 275.0, 5.0)) el_range = katpoint.deg2rad(np.arange(0.0, 86.0, 1.0)) mesh_az, mesh_el = np.meshgrid(az_range, el_range) self.az = mesh_az.ravel() self.el = mesh_el.ravel() # Generate random parameter values with this spread self.param_stdev = katpoint.deg2rad(20. / 60.) self.num_params = len(katpoint.PointingModel())
def test_offset(self): """Test target offset.""" az, el = self.target1.azel(self.ts, self.ant1) offset = dict(projection_type='SIN') target3 = katpoint.construct_azel_target(az - katpoint.deg2rad(1.0), el - katpoint.deg2rad(1.0)) x, y = target3.sphere_to_plane(az, el, self.ts, self.ant1, **offset) offset['x'] = x offset['y'] = y extra_delay = self.delays.extra_delay delay0, phase0 = self.delays.corrections(target3, self.ts, offset=offset) delay1, phase1 = self.delays.corrections(target3, self.ts, self.ts + 1.0, offset) # Conspire to return to special target1 self.assertEqual(delay0['A2h'], extra_delay, 'Delay for ant2h should be zero') self.assertEqual(delay0['A2v'], extra_delay, 'Delay for ant2v should be zero') self.assertEqual(delay1['A2h'][0], extra_delay, 'Delay for ant2h should be zero') self.assertEqual(delay1['A2v'][0], extra_delay, 'Delay for ant2v should be zero') self.assertEqual(delay1['A2h'][1], 0.0, 'Delay rate for ant2h should be zero') self.assertEqual(delay1['A2v'][1], 0.0, 'Delay rate for ant2v should be zero') # Now try (ra, dec) coordinate system ra, dec = self.target1.radec(self.ts, self.ant1) offset = dict(projection_type='ARC', coord_system='radec') target4 = katpoint.construct_radec_target(ra - katpoint.deg2rad(1.0), dec - katpoint.deg2rad(1.0)) x, y = target4.sphere_to_plane(ra, dec, self.ts, self.ant1, **offset) offset['x'] = x offset['y'] = y extra_delay = self.delays.extra_delay delay0, phase0 = self.delays.corrections(target4, self.ts, offset=offset) delay1, phase1 = self.delays.corrections(target4, self.ts, self.ts + 1.0, offset) # Conspire to return to special target1 np.testing.assert_almost_equal(delay0['A2h'], extra_delay, decimal=15) np.testing.assert_almost_equal(delay0['A2v'], extra_delay, decimal=15) np.testing.assert_almost_equal(delay1['A2h'][0], extra_delay, decimal=15) np.testing.assert_almost_equal(delay1['A2v'][0], extra_delay, decimal=15) np.testing.assert_almost_equal(delay1['A2h'][1], 0.0, decimal=15) np.testing.assert_almost_equal(delay1['A2v'][1], 0.0, decimal=15)
def fit_tipping(T_sys,SpillOver,pol): """Fit tipping curve. T_sys(el) = T_cmb + T_gal + T_atm*(1-exp(-tau_0/sin(el))) + T_spill(el) + T_rx We will fit the opacity and T_rx """ T_atm = 1.12 * (273.15 + T_sys.surface_temperature) - 50.0 # ?? # Create a function to give the spillover at any elevation at the observing frequency # Set up full tipping equation y = f(p, x): # function input x = elevation in degrees # parameter vector p = [T_rx, zenith opacity tau_0] # function output y = T_sys in kelvin # func = lambda p, x: p[0] + T_cmb + T_gal + T_spill_func(x) + T_atm * (1 - np.exp(-p[1] / np.sin(deg2rad(x)))) #T_sky = np.average(T_sys.T_sky)# T_sys.Tsky(x) func = lambda p, x: p[0] + T_sys.Tsky(x) + SpillOver.spill[pol](x) + T_atm * (1 - np.exp(-p[1] / np.sin(deg2rad(x)))) # Initialise the fitter with the function and an initial guess of the parameter values tip = scape.fitting.NonLinearLeastSquaresFit(func, [70, 0.005]) tip.fit(T_sys.elevation, T_sys.Tsys[pol]) logger.info('Fit results for %s polarisation:' % (pol,)) logger.info('T_ant = %.2f K' % (tip.params[0],)) logger.info('Zenith opacity tau_0 = %.5f' % (tip.params[1],)) # Calculate atmospheric noise contribution at 10 degrees elevation for comparison with requirements T_atm_10 = T_atm * (1 - np.exp(-tip.params[1] / np.sin(deg2rad(10)))) fit_func = [] logger.info('Atmospheric noise contribution at 10 degrees is: %.2f K' % (T_atm_10,)) for el in T_sys.elevation: fit_func.append(func(tip.params,el)) return {'params': tip.params,'fit':fit_func,'scatter': (T_sys.Tsys[pol]-fit_func),'chisq':chisq_pear(fit_func,T_sys.Tsys[pol])}
def pointing_model(antenna, data): new_model = katpoint.PointingModel() num_params = len(new_model) default_enabled = np.array([1, 3, 4, 5, 6, 7, 8]) - 1 enabled_params = np.tile(False, num_params) enabled_params[default_enabled] = True enabled_params = enabled_params.tolist() # For display purposes, throw out unused parameters P2 and P10 display_params = list(range(num_params)) display_params.pop(9) display_params.pop(1) # Fit new pointing model az, el = data['azimuth'], data['elevation'] measured_delta_az, measured_delta_el = data['delta_azimuth'], data[ 'delta_elevation'] # Uncertainties are optional min_std = deg2rad((np.sqrt(2) * 60. * 1e-12) / 60. / np.sqrt(2)) std_delta_az = np.clip(data['delta_azimuth_std'], min_std, np.inf) \ if 'delta_azimuth_std' in data.dtype.fields else np.tile(min_std, len(az)) std_delta_el = np.clip(data['delta_elevation_std'], min_std, np.inf) \ if 'delta_elevation_std' in data.dtype.fields else np.tile(min_std, len(el)) params, sigma_params = new_model.fit(az, el, measured_delta_az, measured_delta_el, std_delta_az, std_delta_el, enabled_params) antenna.pointing_model = new_model return antenna
def _calc_azel(cache, name, ant): """Calculate virtual (az, el) sensors from actual ones in sensor cache.""" real_sensor = 'Antennas/%s/%s' % (ant, 'pos_actual_scan_azim' if name.endswith('az') else 'pos_actual_scan_elev') cache[name] = sensor_data = katpoint.deg2rad(cache.get(real_sensor)) return sensor_data
def update(fig): """Fit new pointing model and update plots.""" # Perform early redraw to improve interactivity of clicks (which typically change state of target dots) # Target state: 0 = flagged, 1 = unflagged, 2 = highlighted target_state = keep * ((target_index == fig.highlighted_target) + 1) # Specify colours of flagged, unflagged and highlighted dots, respectively, as RGBA tuples dot_colors = np.choose(target_state, np.atleast_3d(np.vstack([(1,1,1,1), (0,0,1,1), (1,0,0,1)]))).T for ax in fig.axes[:7]: ax.dots.set_facecolors(dot_colors) fig.canvas.draw() # Fit new pointing model and update results params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep], std_delta_az[keep], std_delta_el[keep], enabled_params) new.update(new_model) # Update rest of figure fig.texts[3].set_text("$\chi^2$ = %.1f" % new.chi2) fig.texts[4].set_text("all sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(target_index == fig.highlighted_target) fig.texts[5].set_text("target sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(keep) fig.texts[-1].set_text(unique_targets[fig.highlighted_target]) # Update model parameter strings for p, param in enumerate(display_params): fig.texts[2*p + 6].set_text(param_to_str(new_model, param) if enabled_params[param] else '') # HACK to convert sigmas to arcminutes, but not for P9 and P12 (which are scale factors) # This functionality should really reside inside the PointingModel class std_param = rad2deg(sigma_params[param]) * 60. if param not in [8, 11] else sigma_params[param] std_param_str = ("%.2f'" % std_param) if param not in [8, 11] else ("%.0e" % std_param) fig.texts[2*p + 7].set_text(std_param_str if enabled_params[param] and opts.use_stats else '') # Turn parameter string bold if it changed significantly from old value if np.abs(params[param] - old_model.values()[param]) > 3.0 * sigma_params[param]: fig.texts[2*p + 6].set_weight('bold') fig.texts[2*p + 7].set_weight('bold') else: fig.texts[2*p + 6].set_weight('normal') fig.texts[2*p + 7].set_weight('normal') daz_az, del_az, daz_el, del_el, quiver, before, after = fig.axes[:7] # Update quiver plot quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad(old.robust_sky_rms / 60.) quiver.quiv.set_segments(quiver_segments(new.residual_az, new.residual_el, quiver_scale)) quiver.quiv.set_color(np.choose(keep, np.atleast_3d(np.vstack([(0.3,0.3,0.3,0.2), (0.3,0.3,0.3,1)]))).T) # Update residual plots daz_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_xel) * 60.]) del_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_el) * 60.]) daz_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_xel) * 60.]) del_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_el) * 60.]) after.dots.set_offsets(np.c_[np.arctan2(new.residual_el, new.residual_xel), new.abs_sky_error]) resid_lim = 1.2 * max(new.abs_sky_error.max(), old.abs_sky_error.max()) daz_az.set_ylim(-resid_lim, resid_lim) del_az.set_ylim(-resid_lim, resid_lim) daz_el.set_ylim(-resid_lim, resid_lim) del_el.set_ylim(-resid_lim, resid_lim) before.set_ylim(0, resid_lim) after.set_ylim(0, resid_lim) # Redraw the figure fig.canvas.draw()
def select_and_average(filename, average_time): # Read a file into katdal, and average the data to the prescribed averaging time # Returns the weather data and timestamps with the correct averaging interval data = katdal.open(filename) raw_timestamps = data.sensor.timestamps raw_wind_speed = data.wind_speed raw_temperature = data.temperature raw_dumptime = data.dump_period # Get azel of each antenna and separation of each antenna sun = katpoint.Target('Sun, special', antenna=data.ants[0]) alltimestamps = data.timestamps[:] solar_seps = np.zeros_like(alltimestamps) for dumpnum, timestamp in enumerate(alltimestamps): azeltarget = katpoint.construct_azel_target( katpoint.deg2rad(data.az[dumpnum, 0]), katpoint.deg2rad(data.el[dumpnum, 0])) azeltarget.antenna = data.ants[0] solar_seps[dumpnum] = katpoint.rad2deg( azeltarget.separation(sun, timestamp)) #Determine number of dumps to average num_average = max(int(np.round(average_time / raw_dumptime)), 1) #Array of block indices indices = list( range(min(num_average, raw_timestamps.shape[0]), raw_timestamps.shape[0] + 1, min(num_average, raw_timestamps.shape[0]))) timestamps = np.average(np.array(np.split(raw_timestamps, indices)[:-1]), axis=1) wind_speed = np.average(np.array(np.split(raw_wind_speed, indices)[:-1]), axis=1) temperature = np.average(np.array(np.split(raw_temperature, indices)[:-1]), axis=1) dump_time = raw_dumptime * num_average return (timestamps, alltimestamps, wind_speed, temperature, dump_time, solar_seps, data.ants[0])
def test_pointing_model_load_save(self): """Test construction / load / save of pointing model.""" params = katpoint.deg2rad(np.random.randn(self.num_params + 1)) pm = katpoint.PointingModel(params[:-1]) print repr(pm), pm pm2 = katpoint.PointingModel(params[:-2]) self.assertEqual(pm2.values()[-1], 0.0, 'Unspecified pointing model params not zeroed') pm3 = katpoint.PointingModel(params) self.assertEqual(pm3.values()[-1], params[-2], 'Superfluous pointing model params not handled correctly') pm4 = katpoint.PointingModel(pm.description) self.assertEqual(pm4.description, pm.description, 'Saving pointing model to string and loading it again failed') self.assertEqual(pm4, pm, 'Pointing models should be equal') self.assertNotEqual(pm2, pm, 'Pointing models should be inequal') np.testing.assert_almost_equal(pm4.values(), pm.values(), decimal=6)
def _calc_azel(cache, name, ant): """Calculate virtual (az, el) sensors from actual ones in sensor cache.""" suffix = 'azim' if name.endswith('az') else 'elev' real_sensor = f'{ant}_pos_actual_scan_{suffix}' cache[name] = sensor_data = katpoint.deg2rad(cache.get(real_sensor)) return sensor_data
def quiver_scale_callback(event): quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad( old.robust_sky_rms / 60.) fig.axes[4].quiv.set_segments( quiver_segments(new.residual_az, new.residual_el, quiver_scale)) fig.canvas.draw()
add_breaks=False, color='b', alpha=0.5) scape.plots_basic.plot_segments(scan_timestamps, bl_old_resid, labels=scan_targets, width=sample_period, color='b') scape.plots_basic.plot_segments(scan_timestamps, bl_new_resid, labels=[], width=sample_period, add_breaks=False, color='r', lw=2) plt.ylim(-0.5 * delay_period, (num_bls - 0.5) * delay_period) plt.yticks(np.arange(num_bls) * delay_period, baseline_names) plt.xlabel('Time (s), since %s' % (katpoint.Timestamp(data.start_time).local(),)) plt.title('Residual delay errors per baseline (blue = old model and red = new model)') plt.figure(4) plt.clf() ax = plt.axes(polar=True) eastnorth_radius = np.sqrt(old_positions[:, 0] ** 2 + old_positions[:, 1] ** 2) eastnorth_angle = np.arctan2(old_positions[:, 0], old_positions[:, 1]) for ant, theta, r in zip(data.ants, eastnorth_angle, eastnorth_radius): ax.text(np.pi/2. - theta, r * 0.9 * np.pi/2. / eastnorth_radius.max(), ant.name, ha='center', va='center').set_bbox(dict(facecolor='b', lw=1, alpha=0.3)) # Quality of delays obtained from source, with 0 worst and 1 best quality = np.hstack([q.mean(axis=0) for q in extract_scan_segments(1.0 - sigma_delay / max_sigma_delay)]) ax.scatter(np.pi/2 - np.array(scan_mid_az), np.pi/2 - np.array(scan_mid_el), 100*quality, 'k', edgecolors=None, linewidths=0, alpha=0.5) for name, az, el in zip(scan_targets, scan_mid_az, scan_mid_el): ax.text(np.pi/2. - az, np.pi/2. - el, name, ha='center', va='top') ax.set_xticks(katpoint.deg2rad(np.arange(0., 360., 90.))) ax.set_xticklabels(['E', 'N', 'W', 'S']) ax.set_ylim(0., np.pi / 2.) ax.set_yticks(katpoint.deg2rad(np.arange(0., 90., 10.))) ax.set_yticklabels([]) plt.title('Antenna positions and source directions')
def quiver_scale_callback(event): quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad(old.robust_sky_rms / 60.) fig.axes[4].quiv.set_segments(quiver_segments(new.residual_az, new.residual_el, quiver_scale)) fig.canvas.draw()
current_az = session.ants[ 0].sensor.pos_actual_scan_azim.get_value() current_el = session.ants[ 0].sensor.pos_actual_scan_elev.get_value() if current_az is None: user_logger.warning( "Sensor kat.%s.sensor.pos_actual_scan_azim failed - using default azimuth" % (session.ants[0].name)) current_az = 0. if current_el is None: user_logger.warning( "Sensor kat.%s.sensor.pos_actual_scan_elev failed - using default elevation" % (session.ants[0].name)) current_el = 30. current_pos = katpoint.construct_azel_target( katpoint.deg2rad(current_az), katpoint.deg2rad(current_el)) # Get closest strong source that is up strong_sources = kat.sources.filter(el_limit_deg=[20, 75], flux_limit_Jy=100, flux_freq_MHz=opts.centre_freq) if len(strong_sources) == 0: user_logger.warning( "Empty point source catalogue or no targets currently visible" ) target = strong_sources.targets[np.argmin( [t.separation(current_pos) for t in strong_sources])] user_logger.info( "No target specified, picked the closest strong source") session.label('raster') # session.fire_noise_diode('coupler', on=4, off=4)
if i % linelength == linelength - 1: text.append(tmpstr) tmpstr = "" i = i + 1 tmpstr += '%s, ' % (tar) text.append(tmpstr) # Initialise new pointing model and set default enabled parameters new_model = katpoint.PointingModel() num_params = len(new_model) default_enabled = np.array([1, 3, 4, 5, 6, 7]) - 1 enabled_params = np.tile(False, num_params) enabled_params[default_enabled] = True enabled_params = enabled_params.tolist() # Fit new pointing model az, el = angle_wrap(deg2rad(data['azimuth'])), deg2rad(data['elevation']) measured_delta_az, measured_delta_el = deg2rad(data['delta_azimuth']), deg2rad( data['delta_elevation']) # Uncertainties are optional min_std = deg2rad(min_rms / 60. / np.sqrt(2)) std_delta_az = np.clip(deg2rad(data['delta_azimuth_std']), min_std, np.inf) \ if 'delta_azimuth_std' in data.dtype.fields and opts.use_stats else np.tile(min_std, len(az)) std_delta_el = np.clip(deg2rad(data['delta_elevation_std']), min_std, np.inf) \ if 'delta_elevation_std' in data.dtype.fields and opts.use_stats else np.tile(min_std, len(el)) params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep], std_delta_az[keep], std_delta_el[keep], enabled_params)
ra, dec = target.apparent_radec(timestamp=timenow) targetName = target.name.replace(" ", "") print targetName target.name = targetName + '_O' sources.add(target) if opts.cal == 'fluxN': timenow = katpoint.Timestamp() sources = katpoint.Catalogue(add_specials=False) user_logger.info('Performing flux calibration') ra, dec = target.apparent_radec(timestamp=timenow) print target print "ra %f ,dec %f" % (katpoint.rad2deg(ra), katpoint.rad2deg(dec)) dec2 = dec + katpoint.deg2rad(1) print dec2, dec decS = dec - katpoint.deg2rad(1) targetName = target.name.replace(" ", "") print targetName print "newra %f newdec %f" % (katpoint.rad2deg(ra), katpoint.rad2deg(dec)) Ntarget = katpoint.construct_radec_target(ra, dec2) Ntarget.antenna = bf_ants Ntarget.name = targetName + '_N' sources.add(Ntarget) if opts.cal == 'fluxS': timenow = katpoint.Timestamp() sources = katpoint.Catalogue(add_specials=False)
# Interpret first non-comment line as header fields = data[0].tolist() # By default, all fields are assumed to contain floats formats = np.tile(np.float, len(fields)) # The string_fields are assumed to contain strings - use data's string type, as it is of sufficient length formats[[fields.index(name) for name in string_fields if name in fields]] = data.dtype # Convert to heterogeneous record array data = np.rec.fromarrays(data[1:].transpose(), dtype=zip(fields, formats)) # Load antenna description string from first line of file and construct antenna object from it antenna = katpoint.Antenna(file(filename).readline().strip().partition('=')[2]) # Use the pointing model contained in antenna object as the old model (if not overridden by file) # If the antenna has no model specified, a default null model will be used if old_model is None: old_model = antenna.pointing_model # Obtain desired fields and convert to radians az, el = wrap_angle(deg2rad(data['azimuth'])), deg2rad(data['elevation']) measured_delta_az, measured_delta_el = deg2rad(data['delta_azimuth']), deg2rad(data['delta_elevation']) # Uncertainties are optional min_std = deg2rad(opts.min_rms / 60. / np.sqrt(2)) std_delta_az = np.clip(deg2rad(data['delta_azimuth_std']), min_std, np.inf) \ if 'delta_azimuth_std' in data.dtype.fields and opts.use_stats else np.tile(min_std, len(az)) std_delta_el = np.clip(deg2rad(data['delta_elevation_std']), min_std, np.inf) \ if 'delta_elevation_std' in data.dtype.fields and opts.use_stats else np.tile(min_std, len(el)) targets = data['target'] keep = data['keep'].astype(np.bool) if 'keep' in data.dtype.fields else np.tile(True, len(targets)) # Initialise new pointing model and set default enabled parameters new_model = katpoint.PointingModel() num_params = len(new_model) default_enabled = np.nonzero(old_model.values())[0] # If the old model is empty / null, select the most basic set of parameters for starters
phase = turns - np.floor(turns) plt.figure(1) plt.clf() plt.imshow(phase.reshape(x_grid.shape), origin='lower', extent=[x_range[0], x_range[-1], y_range[0], y_range[-1]]) # In terms of (ha, dec) # One second resolution on hour angle - picks up fast fringes that way ha_range = np.linspace(-12., 12., 86401.) dec_range = np.linspace(-90., katpoint.rad2deg(lat) + 90., 101) ha_grid, dec_grid = np.meshgrid(ha_range, dec_range) hh, dd = ha_grid.flatten(), dec_grid.flatten() source_vec = katpoint.hadec_to_enu(hh / 12. * np.pi, katpoint.deg2rad(dd), lat) geom_delay = -np.dot(baseline_m, source_vec) / katpoint.lightspeed geom_delay = geom_delay.reshape(ha_grid.shape) turns = geom_delay * rf_freq phase = turns - np.floor(turns) fringe_rate = np.diff(geom_delay, axis=1) / (np.diff(ha_range) * 3600.) * rf_freq plt.figure(2) plt.clf() plt.imshow(phase, origin='lower', aspect='auto', extent=[ha_range[0], ha_range[-1], dec_range[0], dec_range[-1]]) plt.xlabel('Hour angle (hours)') plt.ylabel('Declination (degrees)')
def calc_pointing_offsets(session, beams, target, middle_time, temperature, pressure, humidity): """Calculate pointing offsets per receptor based on primary beam fits. Parameters ---------- session : :class:`katcorelib.observe.CaptureSession` object The active capture session beams : dict mapping receptor name to list of :class:`BeamPatternFit` Fitted primary beams, per receptor and per frequency chunk target : :class:`katpoint.Target` object The target on which offset pointings were done middle_time : float Unix timestamp at the middle of sequence of offset pointings, used to find the mean location of a moving target (and reference for weather) temperature, pressure, humidity : float Atmospheric conditions at middle time, used for refraction correction Returns ------- pointing_offsets : dict mapping receptor name to offset data (10 floats) Pointing offsets per receptor in degrees, stored as a sequence of - requested (az, el) after refraction (input to the pointing model), - full (az, el) offset, including contributions of existing pointing model, any existing adjustment and newly fitted adjustment (useful for fitting new pointing models as it is independent), - full (az, el) adjustment on top of existing pointing model, replacing any existing adjustment (useful for reference pointing), - relative (az, el) adjustment on top of existing pointing model and adjustment (useful for verifying reference pointing), and - rough uncertainty (standard deviation) of (az, el) adjustment. """ pointing_offsets = {} # Iterate over receptors for ant in sorted(session.observers): beams_freq = beams.get(ant.name, []) beams_freq = [b for b in beams_freq if b is not None and b.is_valid] if not beams_freq: user_logger.debug("%s had no valid primary beam fitted", ant.name) continue offsets_freq = np.array([b.center for b in beams_freq]) offsets_freq_std = np.array([b.std_center for b in beams_freq]) weights_freq = 1. / offsets_freq_std**2 # Do weighted average of offsets over frequency chunks results = np.average(offsets_freq, axis=0, weights=weights_freq, returned=True) pointing_offset = results[0] pointing_offset_std = np.sqrt(1. / results[1]) user_logger.debug("%s x=%+7.2f'+-%.2f\" y=%+7.2f'+-%.2f\"", ant.name, pointing_offset[0] * 60, pointing_offset_std[0] * 3600, pointing_offset[1] * 60, pointing_offset_std[1] * 3600) # Get existing pointing adjustment receptor = getattr(session.kat, ant.name) az_adjust = receptor.sensor.pos_adjust_pointm_azim.get_value() el_adjust = receptor.sensor.pos_adjust_pointm_elev.get_value() existing_adjustment = deg2rad(np.array((az_adjust, el_adjust))) # Start with requested (az, el) coordinates, as they apply # at the middle time for a moving target requested_azel = target.azel(timestamp=middle_time, antenna=ant) # Correct for refraction, which becomes the requested value # at input of pointing model rc = RefractionCorrection() def refract(az, el): # noqa: E306, E301 """Apply refraction correction as at the middle of scan.""" return [az, rc.apply(el, temperature, pressure, humidity)] refracted_azel = np.array(refract(*requested_azel)) # More stages that apply existing pointing model and/or adjustment pointed_azel = np.array(ant.pointing_model.apply(*refracted_azel)) adjusted_azel = pointed_azel + existing_adjustment # Convert fitted offset back to spherical (az, el) coordinates pointing_offset = deg2rad(np.array(pointing_offset)) beam_center_azel = target.plane_to_sphere(*pointing_offset, timestamp=middle_time, antenna=ant) # Now correct the measured (az, el) for refraction and then apply the # existing pointing model and adjustment to get a "raw" measured # (az, el) at the output of the pointing model stage beam_center_azel = refract(*beam_center_azel) beam_center_azel = ant.pointing_model.apply(*beam_center_azel) beam_center_azel = np.array(beam_center_azel) + existing_adjustment # Make sure the offset is a small angle around 0 degrees full_offset_azel = wrap_angle(beam_center_azel - refracted_azel) full_adjust_azel = wrap_angle(beam_center_azel - pointed_azel) relative_adjust_azel = wrap_angle(beam_center_azel - adjusted_azel) # Cheap 'n' cheerful way to convert cross-el uncertainty to azim form offset_azel_std = pointing_offset_std / \ np.array([np.cos(refracted_azel[1]), 1.]) # We store all variants of the pointing offset since we have it all # at our fingertips here point_data = np.r_[rad2deg(refracted_azel), rad2deg(full_offset_azel), rad2deg(full_adjust_azel), rad2deg(relative_adjust_azel), offset_azel_std] pointing_offsets[ant.name] = point_data return pointing_offsets
if len(args) < 1 or not args[0].endswith('.csv'): raise RuntimeError( 'Correct File not passed to program. File should be csv file') data = None for filename in args: if data is None: data = read_offsetfile(filename) else: data = np.r_[data, read_offsetfile(filename)] offsetdata = data #print new_model.description az, el = angle_wrap(deg2rad(offsetdata['azimuth'])), deg2rad( offsetdata['elevation']) measured_delta_az, measured_delta_el = deg2rad( offsetdata['delta_azimuth']), deg2rad(offsetdata['delta_elevation']) def referencemetrics(measured_delta_az, measured_delta_el): """Determine and sky RMS from pointing model.""" text = [] measured_delta_xel = measured_delta_az * np.cos( el) # scale due to sky shape abs_sky_error = np.ma.array(data=measured_delta_xel, mask=False) for target in set(offsetdata['target']): keep = np.ones((len(offsetdata)), dtype=np.bool) for key, targetv in enumerate(offsetdata['target']):
else: # Get current position of first antenna in the list (assume the rest are the same or close) if kat.dry_run: current_az, current_el = session._fake_ants[0][2:] else: current_az = session.ants[0].sensor.pos_actual_scan_azim.get_value() current_el = session.ants[0].sensor.pos_actual_scan_elev.get_value() if current_az is None: user_logger.warning("Sensor kat.%s.sensor.pos_actual_scan_azim failed - using default azimuth" % (session.ants[0].name)) current_az = 0. if current_el is None: user_logger.warning("Sensor kat.%s.sensor.pos_actual_scan_elev failed - using default elevation" % (session.ants[0].name)) current_el = 30. current_pos = katpoint.construct_azel_target(katpoint.deg2rad(current_az), katpoint.deg2rad(current_el)) # Get closest strong source that is up strong_sources = kat.sources.filter(el_limit_deg=[15, 75], flux_limit_Jy=100, flux_freq_MHz=opts.centre_freq) target = strong_sources.targets[np.argmin([t.separation(current_pos) for t in strong_sources])] user_logger.info("No target specified, picked the closest strong source") session.label('raster') session.fire_noise_diode('coupler', 4, 4) session.raster_scan(target, num_scans=3, scan_duration=15, scan_extent=5.0, scan_spacing=0.5) if not kat.dry_run: # Wait until desired HDF5 file appears in the archive (this could take quite a while...) if not session.output_file: raise RuntimeError('Could not obtain name of HDF5 file that was recorded') user_logger.info("Waiting for HDF5 file '%s' to appear in archive" % (session.output_file,)) h5file = session.get_archived_product(download_dir=os.path.abspath(os.path.curdir)) if not os.path.isfile(h5file):
avg_axis=1, start_channel=100, stop_channel=400, include_ts=True) ##### FIT BEAM AND BASELINE ##### # Query KAT antenna for antenna object antenna = katpoint.Antenna(first_ant.sensor.observer.get_value()) # Expected beamwidth in radians (beamwidth factor x lambda / D) expected_width = antenna.beamwidth * katpoint.lightspeed / ( opts.centre_freq * 1e6) / antenna.diameter # Linearly interpolate pointing coordinates to correlator data timestamps interp = scape.fitting.PiecewisePolynomial1DFit(max_degree=1) interp.fit(az[0], az[1]) az = katpoint.deg2rad(interp(timestamps)) interp.fit(el[0], el[1]) el = katpoint.deg2rad(interp(timestamps)) # Calculate target coordinates (projected az-el coordinates relative to target object) target_coords = np.vstack(target.sphere_to_plane(az, el, timestamps, antenna)) # Do quick beam + baseline fitting, where both are fitted in 2-D target coord space # This makes no assumptions about the structure of the scans - they are just viewed as a collection of samples baseline = scape.fitting.Polynomial2DFit((1, 3)) prev_err_power = np.inf # Initially, all data is considered to be in the "outer" region and therefore forms part of the baseline outer = np.tile(True, len(power)) print "Fitting quick beam and baseline of degree (1, 3) to target '%s':" % ( target.name, ) # Alternate between baseline and beam fitting for a few iterations for n in xrange(10):
def analyse_point_source_scans(filename, h5file, opts): # Default output file names are based on input file name dataset_name = os.path.splitext(os.path.basename(filename))[0] if opts.outfilebase is None: opts.outfilebase = dataset_name + '_point_source_scans' kwargs = {} #Force centre freqency if ku-band option is set if opts.ku_band: kwargs['centre_freq'] = 12.5005e9 # Produce canonical version of baseline string (remove duplicate antennas) baseline_ants = opts.baseline.split(',') if len(baseline_ants) == 2 and baseline_ants[0] == baseline_ants[1]: opts.baseline = baseline_ants[0] # Load data set if opts.baseline not in [ant.name for ant in h5file.ants]: raise RuntimeError('Cannot find antenna %s in dataset' % opts.baseline) # dataset = scape.DataSet(h5file, baseline=opts.baseline, nd_models=opts.nd_models, # time_offset=opts.time_offset, **kwargs) dataset = scape.DataSet(filename, baseline=opts.baseline, nd_models=opts.nd_models, time_offset=opts.time_offset, **kwargs) # Select frequency channels and setup defaults if not specified num_channels = len(dataset.channel_select) if opts.freq_chans is None: # Default is drop first and last 25% of the bandpass start_chan = num_channels // 4 end_chan = start_chan * 3 else: start_chan = int(opts.freq_chans.split(',')[0]) end_chan = int(opts.freq_chans.split(',')[1]) chan_select = list(range(start_chan, end_chan + 1)) # Check if a channel mask is specified and apply if opts.channel_mask: mask_file = open(opts.channel_mask, mode='rb') chan_select = ~(pickle.load(mask_file)) mask_file.close() if len(chan_select) != num_channels: raise ValueError( 'Number of channels in provided mask does not match number of channels in data' ) chan_select[:start_chan] = False chan_select[end_chan:] = False dataset = dataset.select(freqkeep=chan_select) # Check scan count if len(dataset.compscans) == 0 or len(dataset.scans) == 0: raise RuntimeError('No scans found in file, skipping data set') scan_dataset = dataset.select(labelkeep='scan', copy=False) if len(scan_dataset.compscans) == 0 or len(scan_dataset.scans) == 0: raise RuntimeError( 'No scans left after standard reduction, skipping data set (no scans labelled "scan", perhaps?)' ) # Override pointing model if it is specified (useful if it is not in data file, like on early KAT-7) if opts.pointing_model: if opts.pointing_model.split('/')[-2] == 'mkat': if opts.ku_band: band = 'ku' else: band = 'l' pt_file = os.path.join(opts.pointing_model, '%s.%s.pm.csv' % (opts.baseline, band)) else: pt_file = os.path.join(opts.pointing_model, '%s.pm.csv' % (opts.baseline)) if not os.path.isfile(pt_file): raise RuntimeError('Cannot find file %s' % (pt_file)) pm = file(pt_file).readline().strip() dataset.antenna.pointing_model = katpoint.PointingModel(pm) # Remove any noise diode models if the ku band option is set and flag for spikes if opts.ku_band: dataset.nd_h_model = None dataset.nd_v_model = None for i in range(len(dataset.scans)): dataset.scans[i].data = scape.stats.remove_spikes( dataset.scans[i].data, axis=1, spike_width=3, outlier_sigma=5.) # Initialise the output data cache (None indicates the compscan has not been processed yet) reduced_data = [{} for n in range(len(scan_dataset.compscans))] # Go one past the end of compscan list to write the output data out to CSV file for current_compscan in range(len(scan_dataset.compscans) + 1): # make things play nice opts.batch = True try: the_compscan = scan_dataset.compscans[current_compscan] except: the_compscan = None fig = plt.figure(1, figsize=(8, 8)) plt.clf() if opts.plot_spectrum: plt.subplot(311) plt.subplot(312) plt.subplot(313) else: plt.subplot(211) plt.subplot(212) plt.subplots_adjust(bottom=0.2, hspace=0.25) plt.figtext(0.05, 0.05, '', va='bottom', ha='left') plt.figtext(0.05, 0.945, '', va='bottom', ha='left') # Start off the processing on the first compound scan logger = logging.root fig.current_compscan = 0 reduce_and_plot(dataset, fig.current_compscan, reduced_data, opts, fig, logger=logger) # Initialise the output data cache (None indicates the compscan has not been processed yet) reduced_data = [{} for n in range(len(scan_dataset.compscans))] # Go one past the end of compscan list to write the output data out to CSV file for current_compscan in range(len(scan_dataset.compscans) + 1): # make things play nice opts.batch = True try: the_compscan = scan_dataset.compscans[current_compscan] except: the_compscan = None logger = logging.root output = local_reduce_and_plot(dataset, current_compscan, reduced_data, opts, logger=logger) offsetdata = output[1] from katpoint import deg2rad def angle_wrap(angle, period=2.0 * np.pi): """wrap angle into the interval -*period* / 2 ... *period* / 2.""" return (angle + 0.5 * period) % period - 0.5 * period az, el = angle_wrap(deg2rad(offsetdata['azimuth'])), deg2rad( offsetdata['elevation']) model_delta_az, model_delta_el = ant.pointing_model.offset(az, el) measured_delta_az = offsetdata[ 'delta_azimuth'] - model_delta_az # pointing model correction measured_delta_el = offsetdata[ 'delta_elevation'] - model_delta_el # pointing model correction """determine new residuals from current pointing model""" residual_az = measured_delta_az - model_delta_az residual_el = measured_delta_el - model_delta_el residual_xel = residual_az * np.cos(el) # Initialise new pointing model and set default enabled parameters keep = np.ones((len(offsetdata)), dtype=np.bool) min_rms = np.sqrt(2) * 60. * 1e-12 use_stats = True new_model = katpoint.PointingModel() num_params = len(new_model) default_enabled = np.array([1, 3, 4, 5, 6, 7]) - 1 enabled_params = np.tile(False, num_params) enabled_params[default_enabled] = True enabled_params = enabled_params.tolist() # Fit new pointing model az, el = angle_wrap(deg2rad(offsetdata['azimuth'])), deg2rad( offsetdata['elevation']) measured_delta_az, measured_delta_el = deg2rad( offsetdata['delta_azimuth']), deg2rad(offsetdata['delta_elevation']) # Uncertainties are optional min_std = deg2rad(min_rms / 60. / np.sqrt(2)) std_delta_az = np.clip(deg2rad(offsetdata['delta_azimuth_std']), min_std, np.inf) \ if 'delta_azimuth_std' in offsetdata.dtype.fields and use_stats else np.tile(min_std, len(az)) std_delta_el = np.clip(deg2rad(offsetdata['delta_elevation_std']), min_std, np.inf) \ if 'delta_elevation_std' in offsetdata.dtype.fields and use_stats else np.tile(min_std, len(el)) params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep], std_delta_az[keep], std_delta_el[keep], enabled_params) """Determine new residuals from new fit""" newmodel_delta_az, newmodel_delta_el = new_model.offset(az, el) residual_az = measured_delta_az - newmodel_delta_az residual_el = measured_delta_el - newmodel_delta_el residual_xel = residual_az * np.cos(el) # Show actual scans h5file.select(scans='scan') fig1 = plt.figure(2, figsize=(8, 8)) plt.scatter(h5file.ra, h5file.dec, s=np.mean(np.abs(h5file.vis[:, 2200:2400, 1]), axis=1)) plt.title('Raster scan over target') plt.ylabel('Dec [deg]') plt.xlabel('Ra [deg]') # Try to fit beam for c in h5file.compscans(): if not dataset is None: dataset = dataset.select(flagkeep='~nd_on') dataset.average() dataset.fit_beams_and_baselines() # Generate output report with PdfPages(opts.outfilebase + '_' + opts.baseline + '.pdf') as pdf: out = reduced_data[0] offset_az, offset_el = "%.1f" % ( 60. * out['delta_azimuth'], ), "%.1f" % (60. * out['delta_elevation'], ) beam_width, beam_height = "%.1f" % ( 60. * out['beam_width_I'], ), "%.2f" % (out['beam_height_I'], ) baseline_height = "%.1f" % (out['baseline_height_I'], ) pagetext = "\nCheck Point Source Scan" pagetext += "\n\nDescription: %s\nName: %s\nExperiment ID: %s" % ( h5file.description, h5file.name, h5file.experiment_id) pagetext = pagetext + "\n" pagetext += "\n\nTest Setup:" pagetext += "\nRaster Scan across bright source" pagetext += "\n\nAntenna %(antenna)s" % out pagetext += "\n------------" pagetext += ("\nTarget = '%(target)s', azel=(%(azimuth).1f, %(elevation).1f) deg, " % out) +\ ("offset=(%s, %s) arcmin" % (offset_az, offset_el)) pagetext += ("\nBeam height = %s %s") % (beam_height, out['data_unit']) pagetext += ("\nBeamwidth = %s' (expected %.1f')") % ( beam_width, 60. * out['beam_expected_width_I']) pagetext += ("\nHH gain = %.3f Jy/%s") % ( out['flux'] / out['beam_height_HH'], out['data_unit']) pagetext += ("\nVV gain = %.3f Jy/%s") % ( out['flux'] / out['beam_height_VV'], out['data_unit']) pagetext += ("\nBaseline height = %s %s") % (baseline_height, out['data_unit']) pagetext = pagetext + "\n" pagetext += ("\nCurrent model AzEl=(%.3f, %.3f) deg" % (model_delta_az[0], model_delta_el[0])) pagetext += ("\nMeasured coordinates using rough fit") pagetext += ("\nMeasured AzEl=(%.3f, %.3f) deg" % (measured_delta_az[0], measured_delta_el[0])) pagetext = pagetext + "\n" pagetext += ("\nDetermine residuals from current pointing model") residual_az = measured_delta_az - model_delta_az residual_el = measured_delta_el - model_delta_el pagetext += ("\nResidual AzEl=(%.3f, %.3f) deg" % (residual_az[0], residual_el[0])) if dataset.compscans[0].beam is not None: if not dataset.compscans[0].beam.is_valid: pagetext += ("\nPossible bad fit!") if (residual_az[0] < 1.) and (residual_el[0] < 1.): pagetext += ("\nResiduals withing L-band beam") else: pagetext += ("\nMaximum Residual, %.2f, larger than L-band beam" % (numpy.max(residual_az[0], residual_el[0]))) pagetext = pagetext + "\n" pagetext += ("\nFitted parameters \n%s" % str(params[:5])) plt.figure(None, figsize=(16, 8)) plt.axes(frame_on=False) plt.xticks([]) plt.yticks([]) plt.title("AR1 Report %s" % opts.outfilebase, fontsize=14, fontweight="bold") plt.text(0, 0, pagetext, fontsize=12) pdf.savefig() plt.close() pdf.savefig(fig) pdf.savefig(fig1) d = pdf.infodict() import datetime d['Title'] = h5file.description d['Author'] = 'AR1' d['Subject'] = 'AR1 check point source scan' d['CreationDate'] = datetime.datetime(2015, 8, 13) d['ModDate'] = datetime.datetime.today()
(src['Name'], src['Type'], accepted_types)) continue if use_atca and src['Name'] not in atca_cat: print("%s skipped: not an ATCA calibrator" % (src['Name'], )) continue names = '[TI80] ' + src['Name'] if len(src['_3C']) > 0: names += ' | 3C ' + src['_3C'] if src['_3C'].endswith('.0'): names += ' | 3C ' + src['_3C'][:-2] if len(src['PKS']) > 0: names += ' | PKS ' + src['PKS'] if len(src['OName']) > 0: names += ' | ' + src['OName'] ra, dec = atca_cat[src['Name']].radec() if use_atca else \ (katpoint.deg2rad(src['_RAJ2000']), katpoint.deg2rad(src['_DEJ2000'])) tags_ra_dec = katpoint.construct_radec_target( ra, dec).add_tags('J2000 ' + src['Type']).description # Extract polarisation data for the current source from pol table pol_data = pol_table[pol_table['Name'] == src['Name']] pol_freqs_MHz = katpoint.lightspeed / (0.01 * pol_data['lambda']) / 1e6 pol_percent = pol_data['Pol'] # Remove duplicate frequencies and fit linear interpolator to data as function of frequency pol_freq, pol_perc = [], [] for freq in np.unique(pol_freqs_MHz): freqfind = (pol_freqs_MHz == freq) pol_freq.append(freq) pol_perc.append(pol_percent[freqfind].mean()) pol_interp = PiecewisePolynomial1DFit(max_degree=1).fit(pol_freq, pol_perc) # Look up source name in 1Jy catalogue and extract its flux density model flux_target = flux_cat['1Jy ' + src['Name']]
tdec, )) session.set_target(target) # Set the target session.track(target, duration=opts.track_duration, announce=False) # Set the target & mode = point for dra in [-1, 0, 1]: for ddec in [-1, 0, 1]: [ra, dec] = target.radec() (tra, tdec) = (katpoint.rad2deg(float(ra)), katpoint.rad2deg(float(dec))) # (ra,dec) = (tra+0.25*dra, tdec+0.25*ddec) (ra, dec) = (tra + 0.5 * dra, tdec + 0.5 * ddec) # (ra,dec) = (tra+1*dra, tdec+1*ddec) newtarget = katpoint.construct_radec_target( katpoint.deg2rad(ra), katpoint.deg2rad(dec)) session.label('track') user_logger.info( "Initiating %g-second track on target (%.2f, %.2f)" % ( opts.track_duration, ra, dec, )) session.set_target(newtarget) # Set the target session.track( newtarget, duration=opts.track_duration, announce=False) # Set the target & mode = point # -fin-
tsys=Tsys, tsys_lim=opts.tsys_lim, eff=e, eff_lim=[opts.eff_min, opts.eff_max], units=opts.units, condition_select=opts.condition_select, pol=opts.polarisation) # Check if we have flagged all the data if np.sum(good) == 0: print('Pol: %s, All data flagged according to selection criteria.' % opts.polarisation) continue # Obtain desired elevations in radians az, el = angle_wrap(katpoint.deg2rad(data['azimuth'])), katpoint.deg2rad( data['elevation']) # Get a fit of an atmospheric absorption model if units are in "K", otherwise use weather data to estimate # opacity for each data point if opts.units == "K": g_0, tau = fit_atmospheric_absorption(gain[good], el[good]) else: tau = np.array([]) for opacity_info in data: tau = np.append(tau, (calc_atmospheric_opacity( opacity_info['temperature'], opacity_info['humidity'] / 100, antenna.observer.elevation / 1000, opacity_info['frequency'] / 1000.0))) g_0 = None
# Load tables in one shot (don't verify, as the VizieR VOTables contain a deprecated DEFINITIONS element) table = Table.read('kuehr1Jy.vot') flux_table = Table.read('kuehr1Jy_flux.vot') src_strings = [] plot_freqs = [flux_table['Freq'].min(), flux_table['Freq'].max()] test_log_freq = np.linspace(np.log10(plot_freqs[0]), np.log10(plot_freqs[1]), 200) plot_rows = 8 plots_per_fig = plot_rows * plot_rows # Iterate through sources for src in table: names = '1Jy ' + src['_1Jy'] if len(src['_3C']) > 0: names += ' | *' + src['_3C'] ra, dec = katpoint.deg2rad(src['_RAJ2000']), katpoint.deg2rad(src['_DEJ2000']) tags_ra_dec = katpoint.construct_radec_target(ra, dec).add_tags('J2000').description # Extract flux data for the current source from flux table flux = flux_table[flux_table['_1Jy'] == src['_1Jy']] # Determine widest possible frequency range where flux is defined (ignore internal gaps in this range) # For better or worse, extend range to at least KAT7 frequency band (also handles empty frequency lists) flux_freqs = flux['Freq'].tolist() + [800.0, 2400.0] min_freq, max_freq = min(flux_freqs), max(flux_freqs) log_freq, log_flux = np.log10(flux['Freq']), np.log10(flux['S']) if src['Fct'] == 'LIN': flux_str = katpoint.FluxDensityModel(min_freq, max_freq, [src['A'], src['B']]).description elif src['Fct'] == 'EXP': flux_str = katpoint.FluxDensityModel(min_freq, max_freq, [src['A'], src['B'], 0.0, 0.0, src['C'], src['D']]).description else: # No flux data found for source - skip it (only two sources, 1334-127 and 2342+82, are discarded)
def setUp(self): self.rc = katpoint.RefractionCorrection() self.el = katpoint.deg2rad(np.arange(0.0, 90.1, 0.1))
fields = data[0].tolist() # By default, all fields are assumed to contain floats formats = np.tile(np.float, len(fields)) # The string_fields are assumed to contain strings - use data's string type, as it is of sufficient length formats[[fields.index(name) for name in string_fields if name in fields]] = data.dtype # Convert to heterogeneous record array data = np.rec.fromarrays(data[1:].transpose(), dtype=zip(fields, formats)) # Load antenna description string from first line of file and construct antenna object from it antenna = katpoint.Antenna(file(filename).readline().strip().partition('=')[2]) # Use the pointing model contained in antenna object as the old model (if not overridden by file) # If the antenna has no model specified, a default null model will be used if old_model is None: old_model = antenna.pointing_model # Obtain desired fields and convert to radians az, el = angle_wrap(deg2rad(data['azimuth'])), deg2rad(data['elevation']) measured_delta_az, measured_delta_el = deg2rad(data['delta_azimuth']), deg2rad( data['delta_elevation']) # Uncertainties are optional min_std = deg2rad(opts.min_rms / 60. / np.sqrt(2)) std_delta_az = np.clip(deg2rad(data['delta_azimuth_std']), min_std, np.inf) \ if 'delta_azimuth_std' in data.dtype.fields and opts.use_stats else np.tile(min_std, len(az)) std_delta_el = np.clip(deg2rad(data['delta_elevation_std']), min_std, np.inf) \ if 'delta_elevation_std' in data.dtype.fields and opts.use_stats else np.tile(min_std, len(el)) targets = data['target'] keep = data['keep'].astype( np.bool) if 'keep' in data.dtype.fields else np.tile(True, len(targets)) # Initialise new pointing model and set default enabled parameters new_model = katpoint.PointingModel() num_params = new_model.num_params
if len(args) < 1 or not args[0].endswith('.csv'): raise RuntimeError( 'Correct File not passed to program. File should be csv file') # read in data data = None for filename in args: if data is None: data, ant = read_offsetfile(filename) else: tmp_offsets, tmp_ant = read_offsetfile(filename) data = np.r_[data, tmp_offsets] if not ant == tmp_ant: raise RuntimeError('The antenna has changed') # fix units and wraps data['azimuth'], data['elevation'] = wrap_angle(deg2rad( data['azimuth'])), deg2rad(data['elevation']) data['delta_azimuth'], data['delta_elevation'] = deg2rad( data['delta_azimuth']), deg2rad(data['delta_elevation']) data['delta_azimuth_std'], data['delta_elevation_std'] = deg2rad( data['delta_azimuth_std']), deg2rad(data['delta_elevation_std']) if opts.refit_pointing_model: ant = pointing_model( ant, data[(data['beam_height_I'] < np.float(opts.power_sample_limit))]) print(ant.pointing_model) output_data = None for offsetdata in chunk_data(data): #New loop to provide the data in steps of test offet scans text, output_data_tmp = referencemetrics(ant, offsetdata, np.float(opts.num_samples_limit),
def update(fig): """Fit new pointing model and update plots.""" # Perform early redraw to improve interactivity of clicks (which typically change state of target dots) # Target state: 0 = flagged, 1 = unflagged, 2 = highlighted target_state = keep * ((target_index == fig.highlighted_target) + 1) # Specify colours of flagged, unflagged and highlighted dots, respectively, as RGBA tuples dot_colors = np.choose( target_state, np.atleast_3d(np.vstack([(1, 1, 1, 1), (0, 0, 1, 1), (1, 0, 0, 1)]))).T for ax in fig.axes[:7]: ax.dots.set_facecolors(dot_colors) fig.canvas.draw() # Fit new pointing model and update results params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep], std_delta_az[keep], std_delta_el[keep], enabled_params) new.update(new_model) # Update rest of figure fig.texts[3].set_text("$\chi^2$ = %.1f" % new.chi2) fig.texts[4].set_text("all sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(target_index == fig.highlighted_target) fig.texts[5].set_text("target sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms)) new.metrics(keep) fig.texts[-1].set_text(unique_targets[fig.highlighted_target]) # Update model parameter strings for p, param in enumerate(display_params): fig.texts[2 * p + 6].set_text( new_model.param_str(param + 1, '%.3e') if enabled_params[param] else '') # HACK to convert sigmas to arcminutes, but not for P9 and P12 (which are scale factors) # This functionality should really reside inside the PointingModel class std_param = rad2deg(sigma_params[param]) * 60. if param not in [ 8, 11 ] else sigma_params[param] std_param_str = ("%.2f'" % std_param) if param not in [8, 11] else ("%.0e" % std_param) fig.texts[2 * p + 7].set_text( std_param_str if enabled_params[param] and opts.use_stats else '') # Turn parameter string bold if it changed significantly from old value if np.abs(params[param] - old_model.params[param]) > 3.0 * sigma_params[param]: fig.texts[2 * p + 6].set_weight('bold') fig.texts[2 * p + 7].set_weight('bold') else: fig.texts[2 * p + 6].set_weight('normal') fig.texts[2 * p + 7].set_weight('normal') daz_az, del_az, daz_el, del_el, quiver, before, after = fig.axes[:7] # Update quiver plot quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad( old.robust_sky_rms / 60.) quiver.quiv.set_segments( quiver_segments(new.residual_az, new.residual_el, quiver_scale)) quiver.quiv.set_color( np.choose( keep, np.atleast_3d(np.vstack([(0.3, 0.3, 0.3, 0.2), (0.3, 0.3, 0.3, 1)]))).T) # Update residual plots daz_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_xel) * 60.]) del_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_el) * 60.]) daz_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_xel) * 60.]) del_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_el) * 60.]) after.dots.set_offsets(np.c_[np.arctan2(new.residual_el, new.residual_xel), new.abs_sky_error]) resid_lim = 1.2 * max(new.abs_sky_error.max(), old.abs_sky_error.max()) daz_az.set_ylim(-resid_lim, resid_lim) del_az.set_ylim(-resid_lim, resid_lim) daz_el.set_ylim(-resid_lim, resid_lim) del_el.set_ylim(-resid_lim, resid_lim) before.set_ylim(0, resid_lim) after.set_ylim(0, resid_lim) # Redraw the figure fig.canvas.draw()
scape.plots_basic.plot_segments(scan_timestamps, bl_old_resid, labels=scan_targets, width=sample_period, color='b') scape.plots_basic.plot_segments(scan_timestamps, bl_new_resid, labels=[], width=sample_period, add_breaks=False, color='r', lw=2) plt.ylim(-0.5 * delay_period, (num_bls - 0.5) * delay_period) plt.yticks(np.arange(num_bls) * delay_period, baseline_names) plt.xlabel('Time (s), since %s' % (katpoint.Timestamp(data.start_time).local(),)) plt.title('Residual delay errors per baseline (blue = old model and red = new model)') plt.figure(4) plt.clf() ax = plt.axes(polar=True) eastnorth_radius = np.sqrt(old_positions[:, 0] ** 2 + old_positions[:, 1] ** 2) eastnorth_angle = np.arctan2(old_positions[:, 0], old_positions[:, 1]) for ant, theta, r in zip(data.ants, eastnorth_angle, eastnorth_radius): ax.text(np.pi/2. - theta, r * 0.9 * np.pi/2. / eastnorth_radius.max(), ant.name, ha='center', va='center').set_bbox(dict(facecolor='b', lw=1, alpha=0.3)) # Quality of delays obtained from source, with 0 worst and 1 best quality = np.hstack([q.mean(axis=0) for q in extract_scan_segments(1.0 - sigma_delay / max_sigma_delay)]) ax.scatter(np.pi/2 - np.array(scan_mid_az), np.pi/2 - np.array(scan_mid_el), 100*quality, 'k', edgecolors=None, linewidths=0, alpha=0.5) for name, az, el in zip(scan_targets, scan_mid_az, scan_mid_el): ax.text(np.pi/2. - az, np.pi/2. - el, name, ha='center', va='top') ax.set_xticks(katpoint.deg2rad(np.arange(0., 360., 90.))) ax.set_xticklabels(['E', 'N', 'W', 'S']) ax.set_ylim(0., np.pi / 2.) ax.set_yticks(katpoint.deg2rad(np.arange(0., 90., 10.))) ax.set_yticklabels([]) plt.title('Antenna positions and source directions') plt.show()
period = float(cycle_length) freq = 1.0 / period print "kat.ptuse_1.req.ptuse_cal_freq (" + data_product_id + ", " + beam_id + ", " + str( freq) + ")" reply = kat.ptuse_1.req.ptuse_cal_freq(data_product_id, beam_id, freq) print "kat.ptuse_1.req.ptuse_cal_freq returned " + str(reply) # Temporary haxx to make sure that AP accepts the upcoming track request time.sleep(2) timenow = katpoint.Timestamp() ra, dec = target.apparent_radec(timestamp=timenow) print target print "ra %f ,dec %f" % (katpoint.rad2deg(ra), katpoint.rad2deg(dec)) dec2 = dec + katpoint.deg2rad(1) print dec2, dec print "newra %f newdec %f" % (katpoint.rad2deg(ra), katpoint.rad2deg(dec)) Ntarget = katpoint.construct_radec_target(ra, dec2) Ntarget.antenna = bf_ants Ntarget.name = target_name + '_R' target = Ntarget print target print target.name # Get onto beamformer target session.track(target, duration=5) # Perform a drift scan if selected if opts.drift_scan: