def test_intersection_nominal_reconstruction():
    """
    Testing the reconstruction of the position in the nominal frame with a three-telescopes system.
    This is done using a squared configuration, of which the impact point occupies a vertex,
    ad the three telescopes the other three vertices.
    """
    hill_inter = HillasIntersection()

    delta = 1.0 * u.m
    horizon_frame = AltAz()
    altitude = 70 * u.deg
    azimuth = 10 * u.deg

    array_direction = SkyCoord(alt=altitude, az=azimuth, frame=horizon_frame)

    nominal_frame = NominalFrame(origin=array_direction)

    focal_length = 28 * u.m

    camera_frame = CameraFrame(
        focal_length=focal_length, telescope_pointing=array_direction
    )

    cog_coords_camera_1 = SkyCoord(x=delta, y=0 * u.m, frame=camera_frame)
    cog_coords_camera_2 = SkyCoord(x=delta / 0.7, y=delta / 0.7, frame=camera_frame)
    cog_coords_camera_3 = SkyCoord(x=0 * u.m, y=delta, frame=camera_frame)

    cog_coords_nom_1 = cog_coords_camera_1.transform_to(nominal_frame)
    cog_coords_nom_2 = cog_coords_camera_2.transform_to(nominal_frame)
    cog_coords_nom_3 = cog_coords_camera_3.transform_to(nominal_frame)

    #  x-axis is along the altitude and y-axis is along the azimuth
    hillas_1 = HillasParametersContainer(
        x=cog_coords_nom_1.fov_lat,
        y=cog_coords_nom_1.fov_lon,
        intensity=100,
        psi=0 * u.deg,
    )

    hillas_2 = HillasParametersContainer(
        x=cog_coords_nom_2.fov_lat,
        y=cog_coords_nom_2.fov_lon,
        intensity=100,
        psi=45 * u.deg,
    )

    hillas_3 = HillasParametersContainer(
        x=cog_coords_nom_3.fov_lat,
        y=cog_coords_nom_3.fov_lon,
        intensity=100,
        psi=90 * u.deg,
    )

    hillas_dict = {1: hillas_1, 2: hillas_2, 3: hillas_3}

    reco_nominal = hill_inter.reconstruct_nominal(hillas_parameters=hillas_dict)

    nominal_pos = SkyCoord(
        fov_lon=u.Quantity(reco_nominal[0], u.rad),
        fov_lat=u.Quantity(reco_nominal[1], u.rad),
        frame=nominal_frame,
    )

    np.testing.assert_allclose(
        nominal_pos.altaz.az.to_value(u.deg), azimuth.to_value(u.deg), atol=1e-8
    )
    np.testing.assert_allclose(
        nominal_pos.altaz.alt.to_value(u.deg), altitude.to_value(u.deg), atol=1e-8
    )
Example #2
0
def derive_lightcurve_info(lightcurve_pk):
   """
   Function to derive extra information about the observations from the 
   fits files, calculate several parameters, and derive weather information
   from external sources is possible.
   
   This information is stored in the lightcurve database entry
   """
   
   #-- get lightcurve
   lightcurve = LightCurve.objects.get(pk=lightcurve_pk)
   hjd, flux, header = lightcurve.get_lightcurve()
   
   
   #-- load info from lightcurve header
   data = instrument_headers.extract_header_info(header)
   
   # HJD
   lightcurve.hjd = data.get('hjd', 2400000)
   lightcurve.hjd_start = data.get('hjd_start', 2400000)
   lightcurve.hjd_end = data.get('hjd_end', 2400000)
   
   # pointing info
   lightcurve.objectname = data.get('objectname', '')
   lightcurve.ra = data.get('ra', -1)
   lightcurve.dec = data.get('dec', -1)
   lightcurve.alt = data.get('alt', -1)
   lightcurve.az = data.get('az', -1)
   lightcurve.airmass = data.get('airmass', -1)
   
   # telescope and instrument info
   lightcurve.instrument = data.get('instrument', 'UK')
   lightcurve.telescope = data.get('telescope', 'UK')
   lightcurve.passband = data.get('passband', 'UK')
   lightcurve.exptime = data.get('exptime', -1)
   lightcurve.cadence = data.get('cadence', -1)
   lightcurve.duration = data.get('duration', -1)
   lightcurve.observer = data.get('observer', 'UK')
   lightcurve.filetype = data.get('filetype', 'UK')
   
   # observing conditions
   if isfloat(data.get('wind_speed', -1)):
      lightcurve.wind_speed = data.get('wind_speed', -1)
   
   if isfloat(data.get('wind_direction', -1)):
      lightcurve.wind_direction = data.get('wind_direction', -1)
      
   lightcurve.seeing = data.get('seeing', -1)

   
   
   #-- observatory
   lightcurve.observatory = instrument_headers.get_observatory(header, lightcurve.project)
   
   #-- save the changes
   lightcurve.save()
   
   #-- if the observatory is in space, no moon ect can be calculated
   if lightcurve.observatory.space_craft:
      return
   
   #-- calculate moon parameters
   time = Time(lightcurve.hjd, format='jd')
   
   # moon illumination wit astroplan (astroplan returns a fraction, but we store percentage)
   lightcurve.moon_illumination =  np.round(moon_illumination(time=time)*100, 1)
   
   # get the star and moon coordinates at time and location of observations
   star = SkyCoord(ra=lightcurve.ra*u.deg, dec=lightcurve.dec*u.deg,)
   moon = get_moon(time)
   
   observatory = lightcurve.observatory.get_EarthLocation()
   frame = AltAz(obstime=time, location=observatory)
   
   star = star.transform_to(frame)
   moon = moon.transform_to(frame)
   
   # store the separation between moon and target
   lightcurve.moon_separation = np.round(star.separation(moon).degree, 1)
   
   #-- get object alt-az and airmass if not stored in header
   if lightcurve.alt == 0:
      lightcurve.alt = star.alt.degree
   if lightcurve.az == 0:
      lightcurve.az = star.az.degree
   if lightcurve.airmass <= 0:
      lightcurve.airmass = np.round(star.secz.value, 2)
   
   #-- save the changes
   lightcurve.save()
Example #3
0
def test_contains(region):
    geom = RegionGeom.create(region)
    position = SkyCoord([0, 0], [0, 1.1], frame="galactic", unit="deg")

    contains = geom.contains(coords={"skycoord": position})
    assert_allclose(contains, [1, 0])
Example #4
0
def plot(obs_parameters='', n=0, m=0, f_rest=0, slope_correction=False, dB=False, vlsr=False, meta=False, avg_ylim=[0,0], cal_ylim=[0,0], rfi=[], xlim=[0,0], ylim=[0,0], dm=0,
	 obs_file='observation.dat', cal_file='', waterfall_fits='', spectra_csv='', power_csv='', plot_file='plot.png'):
	'''
	Process, analyze and plot data.

	Args:
		obs_parameters: dict. Observation parameters (identical to parameters used to acquire data)
			dev_args: string. Device arguments (gr-osmosdr)
			rf_gain: float. RF gain
			if_gain: float. IF gain
			bb_gain: float. Baseband gain
			frequency: float. Center frequency [Hz]
			bandwidth: float. Instantaneous bandwidth [Hz]
			channels: int: Number of frequency channels (FFT size)
			t_sample: float: Integration time per FFT sample
			duration: float: Total observing duration [sec]
			loc: string: latitude, longitude, and elevation of observation (float, separated by spaces)
			ra_dec: string: right ascension and declination of observation target (float, separated by space)
			az_alt: string: azimuth and altitude of observation target (float, separated by space; takes precedence over ra_dec)
		n: int. Median filter factor (spectrum)
		m: int. Median filter factor (time series)
		f_rest: float. Spectral line reference frequency used for radial velocity (Doppler shift) calculations [Hz]
		slope_correction: bool. Correct slope in poorly-calibrated spectra using linear regression
		dB: bool. Display data in decibel scaling
		vlsr: bool. Display graph in VLSR frame of reference
		meta: bool. Display header with date, time, and target
		rfi: list. Blank frequency channels contaminated with RFI ([low_frequency, high_frequency]) [Hz]
		avg_ylim: list. Averaged plot y-axis limits ([low, high])
		cal_ylim: list. Calibrated plot y-axis limits ([low, high])
		xlim: list. x-axis limits ([low_frequency, high_frequency]) [Hz]
		ylim: list. y-axis limits ([start_time, end_time]) [Hz]
		dm: float. Dispersion measure for dedispersion [pc/cm^3]
		obs_file: string. Input observation filename (generated with virgo.observe)
		cal_file: string. Input calibration filename (generated with virgo.observe)
		waterfall_fits: string. Output FITS filename
		spectra_csv: string. Output CSV filename (spectra)
		power_csv: string. Output CSV filename (time series)
		plot_file: string. Output plot filename
	'''
	import matplotlib
	matplotlib.use('Agg') # Try commenting this line if you run into display/rendering errors
	import matplotlib.pyplot as plt
	from matplotlib.gridspec import GridSpec

	plt.rcParams['legend.fontsize'] = 14
	plt.rcParams['axes.labelsize'] = 14
	plt.rcParams['axes.titlesize'] = 18
	plt.rcParams['xtick.labelsize'] = 12
	plt.rcParams['ytick.labelsize'] = 12

	def decibel(x):
		if dB: return 10.0*np.log10(x)
		return x

	def shift(phase_num, n_rows):
		waterfall[:, phase_num] = np.roll(waterfall[:, phase_num], -n_rows)

	def SNR(spectrum, mask=np.array([])):
		'''Signal-to-Noise Ratio estimator, with optional masking.
		If mask not given, then all channels will be used to estimate noise
		(will drastically underestimate S:N - not robust to outliers!)'''

		if mask.size == 0:
			mask = np.zeros_like(spectrum)

		noise = np.nanstd((spectrum[2:]-spectrum[:-2])[mask[1:-1] == 0])/np.sqrt(2)
		background = np.nanmean(spectrum[mask == 0])

		return (spectrum-background)/noise

	def best_fit(power):
		'''Compute best Gaussian fit'''
		avg = np.nanmean(power)
		var = np.var(power)

		gaussian_fit_x = np.linspace(np.min(power),np.max(power),100)
		gaussian_fit_y = 1.0/np.sqrt(2*np.pi*var)*np.exp(-0.5*(gaussian_fit_x-avg)**2/var)

		return [gaussian_fit_x, gaussian_fit_y]

	# Load observation parameters from dictionary argument/header file
	if obs_parameters != '':
		frequency = obs_parameters['frequency']
		bandwidth = obs_parameters['bandwidth']
		channels = obs_parameters['channels']
		t_sample = obs_parameters['t_sample']
		loc = obs_parameters['loc']
		ra_dec = obs_parameters['ra_dec']
		az_alt = obs_parameters['az_alt']
	else:
		header_file = '.'.join(obs_file.split('.')[:-1])+'.header'

		warnings.warn('No observation parameters passed. Attempting to load from header file ('+header_file+')...')

		with open(header_file, 'r') as f:
			headers = [parameter.rstrip('\n') for parameter in f.readlines()]

		for i in range(len(headers)):
			if 'mjd' in headers[i]:
				mjd = float(headers[i].strip().split('=')[1])
			elif 'frequency' in headers[i]:
				frequency = float(headers[i].strip().split('=')[1])
			elif 'bandwidth' in headers[i]:
				bandwidth = float(headers[i].strip().split('=')[1])
			elif 'channels' in headers[i]:
				channels = int(headers[i].strip().split('=')[1])
			elif 't_sample' in headers[i]:
				t_sample = float(headers[i].strip().split('=')[1])
			elif 'loc' in headers[i]:
				loc = tuple(map(float, headers[i].strip().split('=')[1].split(' ')))
			elif 'ra_dec' in headers[i]:
				ra_dec = tuple(map(str, headers[i].split('=')[1].split(' ')))
			elif 'az_alt' in headers[i]:
				az_alt = tuple(map(float, headers[i].split('=')[1].split(' ')))



	# Transform frequency axis limits to MHz
	xlim = [x / 1e6 for x in xlim]

	# Transform to VLSR
	if vlsr:

		from astropy import units as u
		from astropy.coordinates import SpectralCoord, EarthLocation, SkyCoord
		from astropy.time import Time

		obs_location = EarthLocation.from_geodetic(loc[0], loc[1], loc[2])
		obs_time = obs_location.get_itrs(obstime=Time(str(mjd), format='mjd', scale='utc'))

		if az_alt!='':
				obs_coord = SkyCoord(az=az_alt[0]*u.degree, alt=az_alt[1]*u.degree, frame='altaz', location=obs_location, obstime=Time(str(mjd), format='mjd', scale='utc'))
				obs_coord = obs_coord.icrs
				print (obs_coord)
		else:
				obs_coord = SkyCoord(ra=ra_dec[0]*u.degree, dec=ra_dec[1]*u.degree, frame='icrs')


		#Transform center frequency
		frequency = SpectralCoord(frequency * u.MHz, observer=obs_time, target=obs_coord)
		frequency = frequency.with_observer_stationary_relative_to('lsrk')
		frequency = frequency.quantity.value

	# Define Radial Velocity axis limits
	left_velocity_edge = -299792.458*(bandwidth-2*frequency+2*f_rest)/(bandwidth-2*frequency)
	right_velocity_edge = 299792.458*(-bandwidth-2*frequency+2*f_rest)/(bandwidth+2*frequency)

	# Transform sampling time to number of bins
	bins = int(t_sample*bandwidth/channels)

	# Load observation & calibration data
	offset = 1
	waterfall = offset*np.fromfile(obs_file, dtype='float32').reshape(-1, channels)/bins

	# Delete first 3 rows (potentially containing outlier samples)
	waterfall = waterfall[3:, :]

	# Mask RFI-contaminated channels
	if rfi != []:

		for j in range(len(rfi)):

			# Frequency to channel transformation
			current_rfi = rfi[j]
			rfi_lo = channels*(current_rfi[0] - (frequency - bandwidth/2))/bandwidth
			rfi_hi = channels*(current_rfi[1] - (frequency - bandwidth/2))/bandwidth

			# Blank channels
			for i in range(int(rfi_lo), int(rfi_hi)):
				waterfall[:, i] = np.nan

	if cal_file != '':
		waterfall_cal = offset*np.fromfile(cal_file, dtype='float32').reshape(-1, channels)/bins

		# Delete first 3 rows (potentially containing outlier samples)
		waterfall_cal = waterfall_cal[3:, :]

		# Mask RFI-contaminated channels
		if rfi != []:

			for j in range(len(rfi)):

				# Frequency to channel transformation
				current_rfi = rfi[j]
				rfi_lo = channels*(current_rfi[0] - (frequency - bandwidth/2))/bandwidth
				rfi_hi = channels*(current_rfi[1] - (frequency - bandwidth/2))/bandwidth

				# Blank channels
				for i in range(int(rfi_lo), int(rfi_hi)):
					waterfall_cal[:, i] = np.nan

	# Compute average spectra
	with warnings.catch_warnings():
		warnings.filterwarnings(action='ignore', message='Mean of empty slice')
		avg_spectrum = decibel(np.nanmean(waterfall, axis=0))
		if cal_file != '':
			avg_spectrum_cal = decibel(np.nanmean(waterfall_cal, axis=0))

	# Number of sub-integrations
	subs = waterfall.shape[0]

	# Compute Time axis
	t = t_sample*np.arange(subs)

	# Compute Frequency axis; convert Hz to MHz
	frequency = np.linspace(frequency-0.5*bandwidth, frequency+0.5*bandwidth,
	                        channels, endpoint=False)*1e-6

	# Perform de-dispersion
	if dm != 0:
		deltaF = float(np.max(frequency)-np.min(frequency))/subs
		f_start = np.min(frequency)
		for t_bin in range(subs):
			f_chan = f_start+t_bin*deltaF
			deltaT = 4149*dm*((1/(f_chan**2))-(1/(np.max(frequency)**2)))
			n = int((float(deltaT)/(float(1)/channels)))
			shift(t_bin, n)

	# Define array for Time Series plot
	power = decibel(np.nanmean(waterfall, axis=1))

	# Apply Mask
	mask = np.zeros_like(avg_spectrum)
	mask[np.logical_and(frequency > f_rest*1e-6-0.2, frequency < f_rest*1e-6+0.8)] = 1 # Margins OK for galactic HI

	# Define text offset for axvline text label
	text_offset = 0

	# Calibrate Spectrum
	if cal_file != '':
		if dB:
			spectrum = 10**((avg_spectrum-avg_spectrum_cal)/10)
		else:
			spectrum = avg_spectrum/avg_spectrum_cal

		spectrum = SNR(spectrum, mask)
		if slope_correction:
			idx = np.isfinite(frequency) & np.isfinite(spectrum)
			fit = np.polyfit(frequency[idx], spectrum[idx], 1)
			ang_coeff = fit[0]
			intercept = fit[1]
			fit_eq = ang_coeff*frequency + intercept
			spectrum = SNR(spectrum-fit_eq, mask)

		# Mitigate RFI (Frequency Domain)
		if n != 0:
			spectrum_clean = SNR(spectrum.copy(), mask)
			for i in range(0, int(channels)):
				spectrum_clean[i] = np.nanmedian(spectrum_clean[i:i+n])

		# Apply position offset for Spectral Line label
		text_offset = 60

	# Mitigate RFI (Time Domain)
	if m != 0:
		power_clean = power.copy()
		for i in range(0, int(subs)):
			power_clean[i] = np.nanmedian(power_clean[i:i+m])


	# Write Waterfall to file (FITS)
	if waterfall_fits != '':
		from astropy.io import fits

		# Load data
		hdu = fits.PrimaryHDU(waterfall)

		# Prepare FITS headers
		hdu.header['NAXIS'] = 2
		hdu.header['NAXIS1'] = channels
		hdu.header['NAXIS2'] = subs
		hdu.header['CRPIX1'] = channels/2
		hdu.header['CRPIX2'] = subs/2
		hdu.header['CRVAL1'] = frequency[int(channels/2)]
		hdu.header['CRVAL2'] = t[int(subs/2)]
		hdu.header['CDELT1'] = bandwidth*1e-6/channels
		hdu.header['CDELT2'] = t_sample
		hdu.header['CTYPE1'] = 'Frequency (MHz)'
		hdu.header['CTYPE2'] = 'Relative Time (s)'
		try:
			hdu.header['MJD-OBS'] = mjd
		except NameError:
			warnings.warn('Observation MJD could not be found and will not be part of the FITS header.')
			pass

		# Delete pre-existing FITS file
		try:
			os.remove(waterfall_fits)
		except OSError:
			pass

		# Write to file
		hdu.writeto(waterfall_fits)

	# Write Spectra to file (csv)
	if spectra_csv != '':
		if cal_file != '':
			np.savetxt(spectra_csv, np.concatenate((frequency.reshape(channels, 1),
                       avg_spectrum.reshape(channels, 1), avg_spectrum_cal.reshape(channels, 1),
                       spectrum.reshape(channels, 1)), axis=1), delimiter=',', fmt='%1.6f')
		else:
			np.savetxt(spectra_csv, np.concatenate((frequency.reshape(channels, 1),
                       avg_spectrum.reshape(channels, 1)), axis=1), delimiter=',', fmt='%1.6f')

	# Write Time Series to file (csv)
	if power_csv != '':
		np.savetxt(power_csv, np.concatenate((t.reshape(subs, 1), power.reshape(subs, 1)),
                   axis=1), delimiter=',', fmt='%1.6f')

	# Initialize plot
	if cal_file != '':
		fig = plt.figure(figsize=(27, 15))
		gs = GridSpec(2, 3)
	else:
		fig = plt.figure(figsize=(21, 15))
		gs = GridSpec(2, 2)

	if meta:
		from astropy.coordinates import get_constellation

		epoch = (mjd - 40587) * 86400.0
		meta_title = 'Date and Time: ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(epoch)) + '       '
		meta_title += 'Target: ' + obs_coord.to_string('hmsdms', precision=0) + ' in ' + get_constellation(obs_coord) + '\n'
		plt.suptitle(meta_title, fontsize=18)

	# Plot Average Spectrum
	ax1 = fig.add_subplot(gs[0, 0])
	ax1.plot(frequency, avg_spectrum)
	if xlim == [0,0]:
		ax1.set_xlim(np.min(frequency), np.max(frequency))
	else:
		ax1.set_xlim(xlim[0], xlim[1])
	ax1.ticklabel_format(useOffset=False)
	ax1.set_xlabel('Frequency (MHz)')
	if avg_ylim != [0,0]:
		ax1.set_ylim(avg_ylim[0], avg_ylim[1])
	if dB:
		ax1.set_ylabel('Relative Power (dB)')
	else:
		ax1.set_ylabel('Relative Power')

	if vlsr:
		cal_title = r'$Average\ Spectrum\ (V_{LSR})$'
	else:
		cal_title = 'Average Spectrum'

	if f_rest != 0:
		cal_title += '\n'

	ax1.set_title(cal_title)
	ax1.grid()

	if xlim == [0,0] and f_rest != 0:
		# Add secondary axis for Radial Velocity
		ax1_secondary = ax1.twiny()
		ax1_secondary.set_xlabel('Radial Velocity (km/s)', labelpad=5)
		ax1_secondary.axvline(x=0, color='brown', linestyle='--', linewidth=2, zorder=0)
		ax1_secondary.annotate('Spectral Line\nRest Frequency', xy=(460-text_offset, 5),
                               xycoords='axes points', size=14, ha='left', va='bottom', color='brown')
		ax1_secondary.set_xlim(left_velocity_edge, right_velocity_edge)
		ax1_secondary.tick_params(axis='x', direction='in', pad=-22)

	#Plot Calibrated Spectrum
	if cal_file != '':
		ax2 = fig.add_subplot(gs[0, 1])
		ax2.plot(frequency, spectrum, label='Raw Spectrum')
		if n != 0:
			ax2.plot(frequency, spectrum_clean, color='orangered', label='Median (n = '+str(n)+')')

		if cal_ylim !=[0,0]:
			ax2.set_ylim(cal_ylim[0],cal_ylim[1])
		else:
			ax2.set_ylim()

		if xlim == [0,0]:
			ax2.set_xlim(np.min(frequency), np.max(frequency))
		else:
			ax2.set_xlim(xlim[0], xlim[1])
		ax2.ticklabel_format(useOffset=False)
		ax2.set_xlabel('Frequency (MHz)')
		ax2.set_ylabel('Signal-to-Noise Ratio (S/N)')

		if vlsr:
			cal_title = r'$Calibrated\ Spectrum\ (V_{LSR})$' + '\n'
		else:
			cal_title = 'Calibrated Spectrum\n'

		if f_rest != 0:
			ax2.set_title(cal_title)
		else:
			ax2.set_title('Calibrated Spectrum')
		if n != 0:
			if f_rest != 0:
				ax2.legend(bbox_to_anchor=(0.002, 0.96), loc='upper left')
			else:
				ax2.legend(loc='upper left')

		if xlim == [0,0] and f_rest != 0:
			# Add secondary axis for Radial Velocity
			ax2_secondary = ax2.twiny()
			ax2_secondary.set_xlabel('Radial Velocity (km/s)', labelpad=5)
			ax2_secondary.axvline(x=0, color='brown', linestyle='--', linewidth=2, zorder=0)
			ax2_secondary.annotate('Spectral Line\nRest Frequency', xy=(400, 5),
                                   xycoords='axes points', size=14, ha='left', va='bottom', color='brown')
			ax2_secondary.set_xlim(left_velocity_edge, right_velocity_edge)
			ax2_secondary.tick_params(axis='x', direction='in', pad=-22)
		ax2.grid()

	# Plot Dynamic Spectrum
	if cal_file != '':
		ax3 = fig.add_subplot(gs[0, 2])
	else:
		ax3 = fig.add_subplot(gs[0, 1])

	ax3.imshow(decibel(waterfall), origin='lower', interpolation='None', aspect='auto',
		   extent=[np.min(frequency), np.max(frequency), np.min(t), np.max(t)])
	if xlim == [0,0] and ylim != [0,0]:
		ax3.set_ylim(ylim[0], ylim[1])
	elif xlim != [0,0] and ylim == [0,0]:
		ax3.set_xlim(xlim[0], xlim[1])
	elif xlim != [0,0] and ylim != [0,0]:
		ax3.set_xlim(xlim[0], xlim[1])
		ax3.set_ylim(ylim[0], ylim[1])

	ax3.ticklabel_format(useOffset=False)
	ax3.set_xlabel('Frequency (MHz)')
	ax3.set_ylabel('Relative Time (s)')
	ax3.set_title('Dynamic Spectrum (Waterfall)')

	# Adjust Subplot Width Ratio
	if cal_file != '':
		gs = GridSpec(2, 3, width_ratios=[16.5, 1, 1])
	else:
		gs = GridSpec(2, 2, width_ratios=[7.6, 1])

	# Plot Time Series (Power vs Time)
	ax4 = fig.add_subplot(gs[1, 0])
	ax4.plot(t, power, label='Raw Time Series')
	if m != 0:
		ax4.plot(t, power_clean, color='orangered', label='Median (n = '+str(m)+')')
		ax4.set_ylim()
	if ylim == [0,0]:
		ax4.set_xlim(0, np.max(t))
	else:
		ax4.set_xlim(ylim[0], ylim[1])
	ax4.set_xlabel('Relative Time (s)')
	if dB:
		ax4.set_ylabel('Relative Power (dB)')
	else:
		ax4.set_ylabel('Relative Power')
	ax4.set_title('Average Power vs Time')
	if m != 0:
		ax4.legend(bbox_to_anchor=(1, 1), loc='upper right')
	ax4.grid()

	# Plot Total Power Distribution
	if cal_file != '':
		gs = GridSpec(2, 3, width_ratios=[7.83, 1.5, -0.325])
	else:
		gs = GridSpec(2, 2, width_ratios=[8.8, 1.5])

	ax5 = fig.add_subplot(gs[1, 1])

	ax5.hist(power, np.max([int(np.size(power)/50),10]), density=1, alpha=0.5, color='royalblue', orientation='horizontal', zorder=10)
	ax5.plot(best_fit(power)[1], best_fit(power)[0], '--', color='blue', label='Best fit (Raw)', zorder=20)
	if m != 0:
		ax5.hist(power_clean, np.max([int(np.size(power_clean)/50),10]), density=1, alpha=0.5, color='orangered', orientation='horizontal', zorder=10)
		ax5.plot(best_fit(power_clean)[1], best_fit(power_clean)[0], '--', color='red', label='Best fit (Median)', zorder=20)
	ax5.set_xlim()
	ax5.set_ylim()
	ax5.get_shared_x_axes().join(ax5, ax4)
	ax5.set_yticklabels([])
	ax5.set_xlabel('Probability Density')
	ax5.set_title('Total Power Distribution')
	ax5.legend(bbox_to_anchor=(1, 1), loc='upper right')
	ax5.grid()

	# Save plots to file
	plt.tight_layout()
	plt.savefig(plot_file)
	plt.clf()
Example #5
0
def aia_test_arc(aia171_test_map):
    start = SkyCoord(735 * u.arcsec, -471 * u.arcsec, frame=aia171_test_map.coordinate_frame)
    end = SkyCoord(-100 * u.arcsec, 800 * u.arcsec, frame=aia171_test_map.coordinate_frame)
    return GreatArc(start, end)
Example #6
0
imageDir = Dir + 'casa5.4/'
picDir = Dir + 'picture/'
regionDir = Dir + 'region/'

mom0file = imageDir + 'NGC5257_12CO21_combine_pbcor_mom0.fits'
mom1file = imageDir + 'NGC5257_12CO21_pbcor_cube_mom1.fits'
mom2file = imageDir + 'NGC5257_12CO21_pbcor_cube_mom2.fits'

############################################################
# basic information

galaxy = 'NGC5257'
line = '12CO21'

position = SkyCoord(dec=50.4167 * u.arcmin,
                    ra=204.9706 * u.degree,
                    frame='icrs')
beamra = 204 * u.degree + 58 * u.arcmin + 30 * u.arcsec
beamdec = 50 * u.arcmin + 3 * u.arcsec
beamposition = SkyCoord(dec=beamdec, ra=beamra, frame='icrs')

beammajor = 1.008 * u.arcsec / 2.0
beamminor = 0.513 * u.arcsec / 2.0
pa = -64.574 * u.degree

############################################################
# basic settings

testra = 204.97609228
testdec = 0.84611111
Example #7
0
def gcrs_to_altaz(ra, dec):
    altaz_frame = update_altaz()
    gcrs_frame = update_GCRS()
    pos_gcrs = SkyCoord(ra=ra * u.degree, dec=dec * u.degree, frame=gcrs_frame)
    pos_altaz = pos_gcrs.transform_to(altaz_frame)
    return (pos_altaz.az.degree, pos_altaz.alt.degree)
Example #8
0
def get_gaia_data_for_toi(toi_num, use_cache=True, **kwargs):
    info = get_info_for_toi(toi_num, use_cache=use_cache).iloc[0]
    coord = SkyCoord(ra=info["RA"], dec=info["Dec"], unit=(u.hourangle, u.deg))
    return get_gaia_data(coord, approx_mag=float(info["TESS Mag"]), **kwargs)
Example #9
0
def get_gaia_data_for_tic(tic, **kwargs):
    info = get_info_for_tic(tic)
    coord = SkyCoord(
        ra=float(info["ra"]) * u.deg, dec=float(info["dec"]) * u.deg
    )
    return get_gaia_data(coord, approx_mag=float(info["GAIAmag"]), **kwargs)
Example #10
0
    assert output


def test_mast_service_request(patch_post):
    service = 'Mast.Caom.Cone'
    params = {'ra': 23.34086, 'dec': 60.658, 'radius': 0.2}
    result = mast.Mast.service_request(service, params)

    assert isinstance(result, Table)


###########################
# ObservationsClass tests #
###########################

regionCoords = SkyCoord(23.34086, 60.658, unit=('deg', 'deg'))

# query functions


def test_observations_query_region_async(patch_post):
    responses = mast.Observations.query_region_async(regionCoords, radius=0.2)
    assert isinstance(responses, list)


def test_observations_query_region(patch_post):
    result = mast.Observations.query_region(regionCoords, radius=0.2 * u.deg)
    assert isinstance(result, Table)


def test_observations_query_object_async(patch_post):
Example #11
0
 def skycoord(self):
     return SkyCoord(ra=self.ra, dec=self.dec, unit='deg')
def s1c_sp_calibration(fit_path,
                       masterdark_fname,
                       masterflat_fname,
                       solve_pars_fname,
                       save_fname=None,
                       area_rad=4,
                       med_size=31,
                       lat_deg=56.1501667,
                       lon_deg=46.1050833,
                       hei_m=183.):
    if save_fname == None:
        save_fname = solve_pars_fname.split('/')[-1][0:-11] + '.spcal'

    hdulist = fits.open(masterdark_fname, ignore_missing_end=True)
    masterdark = hdulist[0].data
    hdulist.close()

    hdulist = fits.open(masterflat_fname, ignore_missing_end=True)
    masterflat = hdulist[0].data
    hdulist.close()

    err, az0, alt0, a, b, c, d = get_solve_pars(solve_pars_fname)
    M_s1c = get_scale_and_orientation_info(c, d)

    spath = "./.temp/"
    if not os.path.exists(spath):
        os.makedirs(spath)
    fit_filenames = [fit_path + '/' + fn for fn in next(os.walk(fit_path))[2]]

    R_median = np.zeros(len(fit_filenames))
    R_std = np.zeros(len(fit_filenames))

    fig = plt.figure(figsize=(12.8, 7.2))
    fig.set_size_inches(12.8, 7.2)

    #     ax=plt.axes(position=[0.035000000000000003+dx/2, 0.061764705882352888, 0.50624999999999998, 0.89999999999999991])
    ax = plt.axes(position=[
        0.035000000000000003 + dx / 2, 0.26, 0.50624999999999998, 0.7
    ])
    plt.axis('off')
    ax1 = plt.axes(position=[
        0.58205882352941185 + dx, 0.71052941176470596 - 0.15, 0.35, 0.4
    ])
    plt.grid()
    ax2 = plt.axes(
        position=[0.58205882352941185 + dx, 0.061764705882352888, 0.35, 0.4])
    plt.grid()

    ax3 = plt.axes(
        position=[0.035000000000000003 + dx / 2, 0.04, 0.5 / 4, 0.7 / 4])
    plt.grid(b=True)
    ax4 = plt.axes(position=[
        0.035000000000000003 + dx / 2 + 0.19, 0.04, 0.5 / 4, 0.7 / 4
    ])
    plt.grid(b=True)
    ax5 = plt.axes(position=[
        0.035000000000000003 + dx / 2 + 0.38, 0.04, 0.5 / 4, 0.7 / 4
    ])
    plt.grid(b=True)

    R_day = 0

    vrange = 1000
    vshift = 2000

    alt_min = 83 * np.pi / 180
    #     area_rad=3
    #     med_size=31

    XX, YY = np.meshgrid(range(2 * area_rad + 1), range(2 * area_rad + 1))

    fid = open(save_fname, 'w')
    fid.write(
        "# Camera calibration coefficients [Rayleighs per ADC unit] for each fit file:\n"
    )

    for i in range(len(fit_filenames)):
        #     for i in range(5):
        #     for i in range(58,59):
        sys.stdout.write('\r')
        sys.stdout.write("Processing frame " + str(i + 1) + "/" +
                         str(len(fit_filenames)))
        sys.stdout.flush()
        fit_fname = fit_filenames[i]
        hdulist = fits.open(fit_fname, ignore_missing_end=True)
        img = hdulist[0].data.astype('float')
        img0 = np.copy(img)
        img = (img - masterdark.astype('float')) / masterflat.astype('float')
        img_medfilt = img - ss.medfilt(img, kernel_size=med_size)
        #         np.save('img.npy',img)
        hdulist.close()

        med_img = np.median(img)
        max_img0 = np.max(img0)
        #         print(max_img0)

        s1c_site = EarthLocation(lat=lat_deg * u.deg,
                                 lon=lon_deg * u.deg,
                                 height=hei_m * u.m)
        date_obs = s1c_get_date_obs(fit_fname, ut_shift=-4)

        png_prefix = spath + "frame_s1c_" + str(
            date_obs.year)[2::] + "{0:0>2}".format(
                date_obs.month) + "{0:0>2}".format(date_obs.day) + "_"

        BS_coord = SkyCoord(RA, DEC, frame='icrs', unit='rad')
        altaz = BS_coord.transform_to(
            AltAz(obstime=date_obs,
                  location=s1c_site,
                  temperature=20 * u.deg_C,
                  pressure=1013 * u.hPa,
                  relative_humidity=0.5,
                  obswl=630.0 * u.nm))
        AZ = altaz.az.rad
        ALT = altaz.alt.rad

        X, Y = tan_hor2pix(AZ, ALT, az0, alt0, c, d)

        # Catalog filtration
        filt_mask = np.zeros(len(NUM), dtype=bool)
        for j in range(len(NUM)):
            if ALT[j] >= alt_min and X[j] >= 10 and Y[j] >= 10 and X[
                    j] <= img.shape[1] - 10 and Y[j] <= img.shape[0] - 10:
                filt_mask[j] = True
        NUM_filt = NUM[filt_mask]
        BS_ID_filt = BS_ID[filt_mask]
        RA_filt = RA[filt_mask]
        DEC_filt = DEC[filt_mask]
        MAG_filt = MAG[filt_mask]
        FLUX_filt = FLUX[filt_mask]
        SP_type_filt = [
            SP_type[j] for j in range(len(SP_type)) if filt_mask[j] == True
        ]
        ALT_filt = ALT[filt_mask]
        AZ_filt = AZ[filt_mask]
        X_filt = X[filt_mask]
        Y_filt = Y[filt_mask]

        star_pixels = np.zeros(len(NUM_filt), dtype=int)
        star_adc = np.zeros(len(NUM_filt))

        AREA = np.zeros((2 * area_rad + 1, 2 * area_rad + 1, len(NUM_filt)))
        AREA0 = np.zeros((2 * area_rad + 1, 2 * area_rad + 1, len(NUM_filt)))

        for j in range(len(NUM_filt)):
            sum_temp = 0
            num = 0
            AREA[:, :, j] = np.copy(
                img_medfilt[int(Y_filt[j]) - area_rad:int(Y_filt[j]) +
                            area_rad + 1,
                            int(X_filt[j]) - area_rad:int(X_filt[j]) +
                            area_rad + 1])
            AREA0[:, :, j] = np.copy(
                img0[int(Y_filt[j]) - area_rad:int(Y_filt[j]) + area_rad + 1,
                     int(X_filt[j]) - area_rad:int(X_filt[j]) + area_rad + 1])

            arg_max = np.argmax(AREA[:, :, j])
            #             print("max=", np.max(AREA[:,:,j]))
            x0 = XX.flat[arg_max]
            y0 = YY.flat[arg_max]

            AREA[:, :,
                 j] = np.copy(img_medfilt[int(Y_filt[j]) - area_rad + y0 -
                                          area_rad:int(Y_filt[j]) + y0 + 1,
                                          int(X_filt[j]) - area_rad + x0 -
                                          area_rad:int(X_filt[j]) + x0 + 1])
            AREA0[:, :, j] = np.copy(img0[int(Y_filt[j]) - area_rad + y0 -
                                          area_rad:int(Y_filt[j]) + y0 + 1,
                                          int(X_filt[j]) - area_rad + x0 -
                                          area_rad:int(X_filt[j]) + x0 + 1])

            Rast = np.sqrt((XX - area_rad)**2 + (YY - area_rad)**2)

            #             print(Rast)

            area = np.copy(AREA[:, :, j])

            for k in range((2 * area_rad + 1)**2):
                if Rast.flat[k] <= area_rad:
                    num += 1
                    sum_temp += AREA[:, :, j].flat[k]
            star_pixels[j] = num
            star_adc[j] = sum_temp
#             print(star_pixels[j],star_adc[j])
#             print(AREA[:,:,j])
#             print(" ")

# Catalog filtration 2
        filt_mask = np.zeros(len(NUM_filt), dtype=bool)
        for j in range(len(NUM_filt)):
            if np.max(AREA0[:, :, j]) < 0.9 * max_img0:
                filt_mask[j] = True

        AREA_filt2 = np.copy(AREA[:, :, filt_mask])
        AREA0_filt2 = np.copy(AREA0[:, :, filt_mask])

        NUM_filt2 = NUM_filt[filt_mask]
        BS_ID_filt2 = BS_ID_filt[filt_mask]
        RA_filt2 = RA_filt[filt_mask]
        DEC_filt2 = DEC_filt[filt_mask]
        MAG_filt2 = MAG_filt[filt_mask]
        FLUX_filt2 = FLUX_filt[filt_mask]
        SP_type_filt2 = [
            SP_type_filt[j] for j in range(len(SP_type_filt))
            if filt_mask[j] == True
        ]
        X_filt2 = X_filt[filt_mask]
        Y_filt2 = Y_filt[filt_mask]
        ALT_filt2 = ALT_filt[filt_mask]
        AZ_filt2 = AZ_filt[filt_mask]
        star_pixels_filt2 = star_pixels[filt_mask]
        star_adc_filt2 = star_adc[filt_mask]

        R = np.zeros(len(NUM_filt2))
        for j in range(len(NUM_filt2)):
            sol_angle = tan_get_pixel_solid_angle(M_s1c,
                                                  np.pi / 2 - ALT_filt2[j])
            br = get_brightness_in_Rayleighs(100, sol_angle, 1, FLUX_filt2[j])
            sa = star_adc_filt2[j]
            R[j] = br / sa
#             print('br[R]=', br, "; sa[ADC.u]=",sa,"; R=",R[j])

        x_bord = np.arange(2 * area_rad + 1, dtype=float)
        y1_bord = np.sqrt(area_rad**2 -
                          (x_bord - area_rad)**2) + area_rad + 0.5
        y2_bord = -np.sqrt(area_rad**2 -
                           (x_bord - area_rad)**2) + area_rad + 0.5

        if len(NUM_filt2) > 0:

            sort_ord = np.argsort(star_pixels_filt2)

            plt.sca(ax3)
            idd = 0
            area1 = AREA_filt2[:, :, sort_ord[idd]]
            area1_max = np.max(AREA0_filt2[:, :, sort_ord[idd]])
            area1_id = BS_ID_filt2[sort_ord[idd]]
            plt.pcolormesh(area1, cmap="seismic", vmin=-1000, vmax=1000)

            plt.plot(x_bord + 0.5, y1_bord, 'k-', lw=2)
            plt.plot(x_bord + 0.5, y2_bord, 'k-', lw=2)

            plt.ylim(area_rad * 2 + 1, 0)
            plt.xlim(0, area_rad * 2 + 1)
            plt.title(str(area1_id), loc='left', fontsize='smaller')
            plt.title(str(int(star_adc_filt2[sort_ord[idd]])),
                      loc='right',
                      fontsize='smaller')
            plt.title(str(int(R[sort_ord[idd]] * 1000) / 1000),
                      fontsize='smaller')
            plt.ylabel(
                str(int(R[sort_ord[idd]] * star_adc_filt2[sort_ord[idd]])))

            if len(NUM_filt2) > 1:
                plt.sca(ax4)
                idd = 1
                area2 = AREA_filt2[:, :, sort_ord[idd]]
                area2_max = np.max(AREA0_filt2[:, :, sort_ord[idd]])
                area2_id = BS_ID_filt2[sort_ord[idd]]
                plt.pcolormesh(area2, cmap="seismic", vmin=-1000, vmax=1000)

                plt.plot(x_bord + 0.5, y1_bord, 'k-', lw=2)
                plt.plot(x_bord + 0.5, y2_bord, 'k-', lw=2)

                plt.ylim(area_rad * 2 + 1, 0)
                plt.xlim(0, area_rad * 2 + 1)
                plt.title(str(area2_id), loc='left', fontsize='smaller')
                plt.title(str(int(star_adc_filt2[sort_ord[idd]])),
                          loc='right',
                          fontsize='smaller')
                plt.title(str(int(R[sort_ord[idd]] * 1000) / 1000),
                          fontsize='smaller')
                plt.ylabel(
                    str(int(R[sort_ord[idd]] * star_adc_filt2[sort_ord[idd]])))
            if len(NUM_filt2) > 2:
                plt.sca(ax5)
                idd = 2
                area3 = AREA_filt2[:, :, sort_ord[idd]]
                area3_max = np.max(AREA0_filt2[:, :, sort_ord[idd]])
                area3_id = BS_ID_filt2[sort_ord[idd]]
                plt.pcolormesh(area3, cmap="seismic", vmin=-1000, vmax=1000)

                plt.plot(x_bord + 0.5, y1_bord, 'k-', lw=2)
                plt.plot(x_bord + 0.5, y2_bord, 'k-', lw=2)

                plt.ylim(area_rad * 2 + 1, 0)
                plt.xlim(0, area_rad * 2 + 1)
                plt.title(str(area3_id), loc='left', fontsize='smaller')
                plt.title(str(int(star_adc_filt2[sort_ord[idd]])),
                          loc='right',
                          fontsize='smaller')
                plt.title(str(int(R[sort_ord[idd]] * 1000) / 1000),
                          fontsize='smaller')
                plt.ylabel(
                    str(int(R[sort_ord[idd]] * star_adc_filt2[sort_ord[idd]])))

        # print(len(R),R)
        if len(R) > 0:
            R_median[i] = np.median(R)
            temp = (R - R_median[i])**2
            R_std[i] = np.sqrt(np.median(temp))
        #     print(R_median[i])

        plt.sca(ax1)
        plt.plot([0, 50000], [R_median[i], R_median[i]], c='r', lw=2)
        plt.scatter(R * star_adc_filt2,
                    R,
                    s=np.pi * (star_adc_filt2 / 12000 * 7)**2)
        #         print(R[0]*star_adc_filt2[0])
        #         print(R[0]*star_adc_filt2[0]*star_pixels_filt2[0])

        for j in range(len(X_filt2)):
            plt.text(R[j] * star_adc_filt2[j], R[j],
                     str(BS_ID_filt2[j]) + "_" + str(star_pixels_filt2[j]))
        plt.ylabel('Calibration coef. [R/ADCu]')
        plt.xlabel('Star summ brightness [R]')
        plt.title(date_obs, loc='left')
        plt.title(R_median[i], loc='right')
        ax1.set_xlim((0, 30000))
        ax1.set_ylim((0, 5))
        plt.grid(b=True)

        plt.sca(ax2)
        plt.xlim([0, len(fit_filenames) - 1])
        plt.ylim([0, 5])
        plt.plot(i, R_median[i], "r.")
        plt.grid(b=True)
        plt.ylabel('Calibration coef. [R/ADCu]')
        plt.xlabel('frame number')
        if i == len(fit_filenames) - 1:
            R_day = np.median(R_median[np.where(R_median > 0)])
            plt.plot([0, 5000], [R_day, R_day], c='b', lw=2)
            plt.title(R_day, loc='right')

        plt.sca(ax)
        plt.pcolormesh(img,
                       cmap="gray",
                       vmin=np.median(img) - 100,
                       vmax=np.median(img) + 100)
        plt.axis('equal')
        plt.plot(X, Y, marker="o", lw=0., mew=mew, mec="b", mfc='none', ms=ms)
        plt.plot(X_filt,
                 Y_filt,
                 marker="o",
                 lw=0.,
                 mew=mew,
                 mec="g",
                 mfc='none',
                 ms=ms)
        plt.plot(X_filt2,
                 Y_filt2,
                 marker="o",
                 lw=0.,
                 mew=mew,
                 mec="r",
                 mfc='none',
                 ms=ms)
        for j in range(len(X_filt2)):
            plt.text(X_filt2[j],
                     Y_filt2[j],
                     str(BS_ID_filt2[j]) + "_" + "{0:4.2f}".format(R[j]),
                     color='w')
        ax.set_xlim((1, 372))
        ax.set_ylim((281, 1))
        plt.title(
            fit_fname.split('/')[-1] + " " + "{0:0>2}".format(date_obs.hour) +
            ":" + "{0:0>2}".format(date_obs.minute) + ":" +
            "{0:0>2}".format(date_obs.second))
        #     plt.show()
        plt.axis('off')
        png_fname = png_prefix + "{0:0>4}".format(i + 1) + ".png"
        #   print(png_fname)
        plt.savefig(png_fname)
        ax.clear()
        ax1.clear()
        ax3.clear()
        ax4.clear()
        ax5.clear()

        fid.write(
            fit_fname.split('/')[-1] + " " + str(R_median[i]) + " " +
            str(R_std[i]) + "\n")

    plt.close()
    sys.stdout.write('\n')
    sys.stdout.flush()
    fid.close()

    return png_prefix, len(fit_filenames), R_median, R_std
Example #13
0
def test_get_new_observer(aia171_test_map):
    initial_obstime = aia171_test_map.date
    rotation_interval = 2 * u.day
    new_time = initial_obstime + rotation_interval
    time_delta = new_time - initial_obstime
    observer = get_earth(initial_obstime + rotation_interval)

    # The observer time is set along with other definitions of time
    for time in (rotation_interval, new_time, time_delta):
        with pytest.raises(ValueError):
            new_observer = _get_new_observer(initial_obstime, observer, time)

    # Obstime property is present but the value is None
    observer_obstime_is_none = SkyCoord(12 * u.deg,
                                        46 * u.deg,
                                        frame=frames.HeliographicStonyhurst)
    with pytest.raises(ValueError):
        new_observer = _get_new_observer(None, observer_obstime_is_none, None)

    # When the observer is set, it gets passed back out
    new_observer = _get_new_observer(initial_obstime, observer, None)
    assert isinstance(new_observer, SkyCoord)
    np.testing.assert_almost_equal(new_observer.transform_to(
        frames.HeliographicStonyhurst).lon.to(u.deg).value,
                                   observer.transform_to(
                                       frames.HeliographicStonyhurst).lon.to(
                                           u.deg).value,
                                   decimal=3)
    np.testing.assert_almost_equal(new_observer.transform_to(
        frames.HeliographicStonyhurst).lat.to(u.deg).value,
                                   observer.transform_to(
                                       frames.HeliographicStonyhurst).lat.to(
                                           u.deg).value,
                                   decimal=3)
    np.testing.assert_almost_equal(
        new_observer.transform_to(frames.HeliographicStonyhurst).radius.to(
            u.au).value,
        observer.transform_to(frames.HeliographicStonyhurst).radius.to(
            u.au).value,
        decimal=3)

    # When the time is set, a coordinate for Earth comes back out
    for time in (rotation_interval, new_time, time_delta):
        with pytest.warns(
                UserWarning,
                match="Using 'time' assumes an Earth-based observer"):
            new_observer = _get_new_observer(initial_obstime, None, time)
        assert isinstance(new_observer, SkyCoord)

        np.testing.assert_almost_equal(
            new_observer.transform_to(frames.HeliographicStonyhurst).lon.to(
                u.deg).value,
            observer.transform_to(frames.HeliographicStonyhurst).lon.to(
                u.deg).value,
            decimal=3)
        np.testing.assert_almost_equal(
            new_observer.transform_to(frames.HeliographicStonyhurst).lat.to(
                u.deg).value,
            observer.transform_to(frames.HeliographicStonyhurst).lat.to(
                u.deg).value,
            decimal=3)
        np.testing.assert_almost_equal(
            new_observer.transform_to(frames.HeliographicStonyhurst).radius.to(
                u.au).value,
            observer.transform_to(frames.HeliographicStonyhurst).radius.to(
                u.au).value,
            decimal=3)

    # The observer and the time cannot both be None
    with pytest.raises(ValueError):
        new_observer = _get_new_observer(initial_obstime, None, None)
def test_reconstruction():
    """
    a test of the complete fit procedure on one event including:
    • tailcut cleaning
    • hillas parametrisation
    • direction fit
    • position fit

    in the end, proper units in the output are asserted """

    filename = get_dataset_path("gamma_test_large.simtel.gz")

    fit = HillasIntersection()

    source = EventSource(filename, max_events=10)
    calib = CameraCalibrator(source.subarray)

    horizon_frame = AltAz()

    reconstructed_events = 0

    for event in source:
        calib(event)

        mc = event.simulation.shower
        array_pointing = SkyCoord(az=mc.az, alt=mc.alt, frame=horizon_frame)

        hillas_dict = {}
        telescope_pointings = {}

        for tel_id, dl1 in event.dl1.tel.items():

            geom = source.subarray.tel[tel_id].camera.geometry

            telescope_pointings[tel_id] = SkyCoord(
                alt=event.pointing.tel[tel_id].altitude,
                az=event.pointing.tel[tel_id].azimuth,
                frame=horizon_frame,
            )

            mask = tailcuts_clean(
                geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0
            )

            try:
                moments = hillas_parameters(geom[mask], dl1.image[mask])
                hillas_dict[tel_id] = moments
            except HillasParameterizationError as e:
                print(e)
                continue

        if len(hillas_dict) < 2:
            continue
        else:
            reconstructed_events += 1

        # divergent mode put to on even though the file has parallel pointing.
        fit_result = fit.predict(
            hillas_dict, source.subarray, array_pointing, telescope_pointings
        )

        print(fit_result)
        print(event.simulation.shower.core_x, event.simulation.shower.core_y)
        fit_result.alt.to(u.deg)
        fit_result.az.to(u.deg)
        fit_result.core_x.to(u.m)
        assert fit_result.is_valid

    assert reconstructed_events > 0
def gal2cel_angle(glon,glat,angle,offset=1e-7):
    from astropy.coordinates import SkyCoord
    import astropy.units as u
    origin = SkyCoord(glon,glat,unit=u.deg,frame='galactic')
    return estimate_angle(angle,origin,'fk5',offset)
Example #16
0
 def from_row(cls, row, fov=0.0333, layout=None):
     coords = SkyCoord(row.RA, row.DEC, unit=(u.hourangle, u.deg))
     return cls(coords, row.name, layout=layout)
def cel2gal_angle(ra,dec,angle,offset=1e-7):
    from astropy.coordinates import SkyCoord
    import astropy.units as u
    origin = SkyCoord(ra,dec,unit=u.deg,frame='fk5')
    return estimate_angle(angle,origin,'galactic',offset)
hpc_y = np.arange(-700, 800, 100) * u.arcsec
hpc_x = np.zeros_like(hpc_y)

##############################################################################
# Let's define how many days in the future we want to rotate to.

dt = TimeDelta(4*u.day)
future_date = aia_map.date + dt

##############################################################################
# Now let's plot the original and rotated positions on the AIA map.

fig = plt.figure()
ax = plt.subplot(projection=aia_map)
aia_map.plot(clip_interval=(1, 99.99)*u.percent)
ax.set_title('The effect of {} days of differential rotation'.format(dt.to(u.day).value))
aia_map.draw_grid()

for this_hpc_x, this_hpc_y in zip(hpc_x, hpc_y):
    start_coord = SkyCoord(this_hpc_x, this_hpc_y, frame=aia_map.coordinate_frame)
    rotated_coord = solar_rotate_coordinate(start_coord, time=future_date)
    coord = SkyCoord([start_coord.Tx, rotated_coord.Tx],
                     [start_coord.Ty, rotated_coord.Ty],
                     frame=aia_map.coordinate_frame)
    ax.plot_coord(coord, 'o-')
plt.ylim(0, aia_map.data.shape[1])
plt.xlim(0, aia_map.data.shape[0])

plt.show()
Example #19
0
def json_to_sdss_dlasurvey(json_file, sdss_survey, add_pf=True, debug=False):
    """ Convert JSON output file to a DLASurvey object
    Assumes SDSS bookkeeping for sightlines (i.e. PLATE, FIBER)

    Parameters
    ----------
    json_file : str
      Full path to the JSON results file
    sdss_survey : DLASurvey
      SDSS survey, usually human (e.g. JXP for DR5)
    add_pf : bool, optional
      Add plate/fiber to DLAs in sdss_survey

    Returns
    -------
    ml_survey : LLSSurvey
      Survey object for the LLS

    """
    print("Loading SDSS Survey from JSON file {:s}".format(json_file))
    # imports
    from pyigm.abssys.dla import DLASystem
    from pyigm.abssys.lls import LLSSystem
    # Fiber key
    for fkey in ['FIBER', 'FIBER_ID', 'FIB']:
        if fkey in sdss_survey.sightlines.keys():
            break
    # Read
    ml_results = ltu.loadjson(json_file)
    use_platef = False
    if 'plate' in ml_results[0].keys():
        use_platef = True
    else:
        if 'id' in ml_results[0].keys():
            use_id = True
    # Init
    #idict = dict(plate=[], fiber=[], classification_confidence=[],  # FOR v2
    #             classification=[], ra=[], dec=[])
    idict = dict(ra=[], dec=[])
    if use_platef:
        for key in ['plate', 'fiber', 'mjd']:
            idict[key] = []
    ml_tbl = Table()
    ml_survey = LLSSurvey()
    systems = []
    in_ml = np.array([False]*len(sdss_survey.sightlines))
    # Loop
    for obj in ml_results:
        # Sightline
        for key in idict.keys():
            idict[key].append(obj[key])
        # DLAs
        #if debug:
        #    if (obj['plate'] == 1366) & (obj['fiber'] == 614):
        #        sv_coord = SkyCoord(ra=obj['ra'], dec=obj['dec'], unit='deg')
        #        print("GOT A MATCH IN RESULTS FILE")
        for idla in obj['dlas']:
            """
            dla = DLASystem((sdss_survey.sightlines['RA'][mt[0]],
                             sdss_survey.sightlines['DEC'][mt[0]]),
                            idla['spectrum']/(1215.6701)-1., None,
                            idla['column_density'])
            """
            if idla['z_dla'] < 1.8:
                continue
            isys = LLSSystem((obj['ra'],obj['dec']),
                    idla['z_dla'], None, NHI=idla['column_density'], zem=obj['z_qso'])
            isys.confidence = idla['dla_confidence']
            if use_platef:
                isys.plate = obj['plate']
                isys.fiber = obj['fiber']
            elif use_id:
                plate, fiber = [int(spl) for spl in obj['id'].split('-')]
                isys.plate = plate
                isys.fiber = fiber
            # Save
            systems.append(isys)
    # Connect to sightlines
    ml_coord = SkyCoord(ra=idict['ra'], dec=idict['dec'], unit='deg')
    s_coord = SkyCoord(ra=sdss_survey.sightlines['RA'], dec=sdss_survey.sightlines['DEC'], unit='deg')
    idx, d2d, d3d = match_coordinates_sky(s_coord, ml_coord, nthneighbor=1)
    used = d2d < 1.*u.arcsec
    for iidx in np.where(~used)[0]:
        print("Sightline RA={:g}, DEC={:g} was not used".format(sdss_survey.sightlines['RA'][iidx],
                                                                sdss_survey.sightlines['DEC'][iidx]))
    # Add plate/fiber to statistical DLAs
    if add_pf:
        dla_coord = sdss_survey.coord
        idx2, d2d, d3d = match_coordinates_sky(dla_coord, s_coord, nthneighbor=1)
        if np.min(d2d.to('arcsec').value) > 1.:
            raise ValueError("Bad match to sightlines")
        for jj,igd in enumerate(np.where(sdss_survey.mask)[0]):
            dla = sdss_survey._abs_sys[igd]
            try:
                dla.plate = sdss_survey.sightlines['PLATE'][idx2[jj]]
            except IndexError:
                pdb.set_trace()
            dla.fiber = sdss_survey.sightlines[fkey][idx2[jj]]
    # Finish
    ml_survey._abs_sys = systems
    if debug:
        ml2_coord = ml_survey.coord
        minsep = np.min(sv_coord.separation(ml2_coord))
        minsep2 = np.min(sv_coord.separation(s_coord))
        tmp = sdss_survey.sightlines[used]
        t_coord = SkyCoord(ra=tmp['RA'], dec=tmp['DEC'], unit='deg')
        minsep3 = np.min(sv_coord.separation(t_coord))
        pdb.set_trace()
    ml_survey.sightlines = sdss_survey.sightlines[used]
    for key in idict.keys():
        ml_tbl[key] = idict[key]
    ml_survey.ml_tbl = ml_tbl
    # Return
    return ml_survey
Example #20
0
def compute_barycentric_correction(ras, decs, times, exps, site="SSO", 
                                   disable_auto_max_age=False,
                                   overrid_iers=False):
    """Compute the barycentric corrections for a set of stars

    In late 2019 issues were encountered accessing online files related to the
    International Earth Rotation and Reference Systems Service. This is 
    required to calculate barycentric corrections for the data. This astopy 
    issue may prove a useful resource again if the issue reoccurs:
    - https://github.com/astropy/astropy/issues/8981

    Parameters
    ----------
    ras: string array
        Array of right ascensions in string form: "HH:MM:SS.S".
    
    decs: string array
        Array of declinations in string form: "DD:MM:SS.S".

    times: string/float array
        Array of times in MJD format.

    site: string
        The site name to look up its coordinates.

    disable_auto_max_age: boolean, defaults to False
        Useful only when IERS server is not working.

    Returns
    -------
    bcors: float array
        Array of barycentric corrections in km/s.
    """
    # Get the location
    loc = EarthLocation.of_site(site)

    # Initialise barycentric correction array
    bcors = []

    if disable_auto_max_age:
        #from astropy.utils.iers import IERS_A_URL
        IERS_A_URL = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals.all'
        #from astropy.utils.iers import conf
        #conf.auto_max_age = None
    
    # Override the IERS server if the mirror is down or not up to date
    if overrid_iers:
        from astropy.utils import iers
        from astropy.utils.iers import conf as iers_conf
        url = "https://datacenter.iers.org/data/9/finals2000A.all"
        iers_conf.iers_auto_url = url
        iers_conf.reload()

    # Calculate the barycentric correction for every star
    for ra, dec, time, exp in zip(tqdm(ras), decs, times, exps):
        sc = SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg))

        # Get the *mid-point* of the observeration
        time = Time(float(time), format="mjd") + 0.5*float(exp)*u.second
        barycorr = sc.radial_velocity_correction(obstime=time, location=loc)  
        bcors.append(barycorr.to(u.km/u.s).value)

    return bcors
Example #21
0
def test_tpf_from_images():
    """Basic tests of tpf.from_fits_images()"""
    # Not without a wcs...
    with pytest.raises(Exception):
        KeplerTargetPixelFile.from_fits_images(_create_image_array(),
                                               size=(3, 3),
                                               position=SkyCoord(-234.75,
                                                                 8.3393,
                                                                 unit='deg'))

    # Make a fake WCS based on astropy.docs...
    w = wcs.WCS(naxis=2)
    w.wcs.crpix = [-234.75, 8.3393]
    w.wcs.cdelt = np.array([-0.066667, 0.066667])
    w.wcs.crval = [0, -90]
    w.wcs.ctype = ["RA---AIR", "DEC--AIR"]
    w.wcs.set_pv([(2, 1, 45.0)])
    pixcrd = np.array([[0, 0], [24, 38], [45, 98]], np.float_)
    header = w.to_header()
    header['CRVAL1P'] = 10
    header['CRVAL2P'] = 20
    ra, dec = 268.21686048, -73.66991904

    # Now this should work.
    images = _create_image_array(header=header)
    tpf = KeplerTargetPixelFile.from_fits_images(images,
                                                 size=(3, 3),
                                                 position=SkyCoord(
                                                     ra,
                                                     dec,
                                                     unit=(u.deg, u.deg)))
    assert isinstance(tpf, KeplerTargetPixelFile)

    with warnings.catch_warnings():
        # Some cards are too long -- to be investigated.
        warnings.simplefilter("ignore", VerifyWarning)
        # Can we write the output to disk?
        # `delete=False` is necessary below to enable writing to the file on Windows
        # but it means we have to clean up the tmp file ourselves
        tmp = tempfile.NamedTemporaryFile(delete=False)
        try:
            tpf.to_fits(tmp.name)
        finally:
            tmp.close()
            os.remove(tmp.name)

        # Can we read in a list of file names or a list of HDUlists?
        hdus = []
        tmpfile_names = []
        for im in images:
            tmpfile = tempfile.NamedTemporaryFile(delete=False)
            tmpfile_names.append(tmpfile.name)
            hdu = fits.HDUList([fits.PrimaryHDU(), im])
            hdu.writeto(tmpfile.name)
            hdus.append(hdu)

        # Should be able to run with a list of file names
        tpf_tmpfiles = KeplerTargetPixelFile.from_fits_images(
            tmpfile_names,
            size=(3, 3),
            position=SkyCoord(ra, dec, unit=(u.deg, u.deg)))

        # Should be able to run with a list of HDUlists
        tpf_hdus = KeplerTargetPixelFile.from_fits_images(
            hdus, size=(3, 3), position=SkyCoord(ra, dec, unit=(u.deg, u.deg)))

        # Clean up the temporary files we created
        for filename in tmpfile_names:
            try:
                os.remove(filename)
            except PermissionError:
                pass  # This appears to happen on Windows
Example #22
0
    def save_packets(self):

        for packet in self._open_avro():
            print(f"working on {packet['objectId']}")

            do_process = True
            if self.only_pure and not self._is_alert_pure(packet):
                do_process = False

            if not do_process:
                print(f"{self.fname}: not pure. Skipping")
                continue

            s = Source.query.filter(Source.id == packet["objectId"]).first()
            if s:
                print("Found: an existing source with id = " +
                      packet["objectId"])
                source_is_varstar = s.varstar in [True]
                if not self.clobber and s.origin == f"{os.path.basename(self.fname)}":
                    print(
                        f"already added this source with this avro packet {os.path.basename(self.fname)}"
                    )
                    continue

            # make a dataframe and save the source/phot
            dflc = self._make_dataframe(packet)

            source_info = {
                'id': packet["objectId"],
                'ra': packet["candidate"]["ra"],
                'dec': packet["candidate"]["dec"],
                'ra_dis': packet["candidate"]["ra"],
                'dec_dis': packet["candidate"]["dec"],
                'dist_nearest_source': packet["candidate"].get("distnr"),
                'mag_nearest_source': packet["candidate"].get("magnr"),
                'e_mag_nearest_source': packet["candidate"].get("sigmagnr"),
                'sgmag1': packet["candidate"].get("sgmag1"),
                'srmag1': packet["candidate"].get("srmag1"),
                'simag1': packet["candidate"].get("simag1"),
                'objectidps1': packet["candidate"].get("objectidps1"),
                'sgscore1': packet["candidate"].get("sgscore1"),
                'distpsnr1': packet["candidate"].get("distpsnr1"),
                'score': packet['candidate']['rb']
            }

            if s is None:
                s = Source(**source_info,
                           origin=f"{os.path.basename(self.fname)}",
                           groups=[self.ztfpack.g])
                source_is_varstar = False
                new_source = True
            else:
                print("Found an existing source with id = " +
                      packet["objectId"])
                new_source = False

            # let's see if we have already
            comments = Comment.query.filter(Comment.obj_id == packet["objectId"]) \
                                    .filter(Comment.origin == f"{os.path.basename(self.fname)}")

            skip = False
            if self.clobber:
                if comments.count() > 0:
                    print("removing preexisting comments from this packet")
                    comments.delete()
                    DBSession().commit()
            else:
                if comments.count() > 0:
                    skip = True

            if not skip:
                print(f"packet id: {packet['objectId']}")
                if new_source:
                    s.comments = [Comment(text=comment, obj_id=packet["objectId"],
                                  user=self.ztfpack.group_admin_user,
                                  origin=f"{os.path.basename(self.fname)}")
                                  for comment in ["Added by ztf_upload_avro", \
                                              f"filename = {os.path.basename(self.fname)}"]]
                else:
                    comment_list = [Comment(text=comment, obj_id=packet["objectId"],
                                    user=self.ztfpack.group_admin_user,
                                    origin=f"{os.path.basename(self.fname)}")
                                    for comment in ["Added by ztf_upload_avro", \
                                              f"filename = {os.path.basename(self.fname)}"]]

            photdata = []
            varstarness = []

            ssdistnr = packet["candidate"].get("ssdistnr")

            is_roid = False
            if packet["candidate"].get("isdiffpos", 'f') in ["1", "t"]:
                if not ((ssdistnr is None) or (ssdistnr < 0) or
                        (ssdistnr > 5)):
                    is_roid = True

            for j, row in dflc.iterrows():
                rj = row.to_dict()
                if ((packet["candidate"].get("sgscore1", 1.0) or 1.0) >= 0.5) and \
                   ((packet["candidate"].get("distpsnr1", 10) or 10) < 1.0) or \
                    (rj.get("isdiffpos", 'f') not in ["1", "t"] and \
                     not pd.isnull(rj.get('magpsf'))):
                    if not is_roid:
                        # make sure it's not a roid
                        varstarness.append(True)
                else:
                    varstarness.append(False)

                phot = {
                    "mag": rj.pop('magpsf'),
                    "e_mag": rj.pop("sigmapsf"),
                    "lim_mag": rj.pop('diffmaglim'),
                    "filter": str(rj.pop('fid')),
                    "score": rj.pop("rb"),
                    "candid": rj.pop("candid"),
                    "isdiffpos": rj.pop("isdiffpos") in ["1", "t"],
                    'dist_nearest_source': rj.pop("distnr"),
                    'mag_nearest_source': rj.pop("magnr"),
                    'e_mag_nearest_source': rj.pop("sigmagnr")
                }
                t = Time(rj.pop("jd"), format="jd")
                phot.update({
                    "observed_at": t.iso,
                    "mjd": t.mjd,
                    "time_format": "iso",
                    "time_scale": "utc"
                })

                # calculate the variable star mag
                sign = 1.0 if phot["isdiffpos"] else -1.0
                mref = phot["mag_nearest_source"]
                mref_err = phot["e_mag_nearest_source"]
                mdiff = phot["mag"]
                mdiff_err = phot["e_mag"]

                # Three options here:
                #   diff is detected in positive (ref source got brighter)
                #   diff is detected in the negative (ref source got fainter)
                #   diff is undetected in the neg/pos (ref similar source)
                try:
                    if not pd.isnull(mdiff):
                        total_mag = -2.5 * np.log10(10**(-0.4 * mref) +
                                                    sign * 10**(-0.4 * mdiff))
                        tmp_total_mag_errs = (-2.5*np.log10(10**(-0.4*mref) +
                                              sign*10**(-0.4*(mdiff + mdiff_err))) \
                                              - total_mag,
                                              -2.5*np.log10(10**(-0.4*mref) +
                                              sign*10**(-0.4*(mdiff - mdiff_err))) \
                                              - total_mag)
                        # add errors in quadature -- geometric mean of diff err
                        # and ref err
                        total_mag_err = np.sqrt(-1.0 * tmp_total_mag_errs[0] *
                                                tmp_total_mag_errs[1] +
                                                mref_err**2)
                    else:
                        # undetected source
                        mref = packet["candidate"].get("magnr")
                        mref_err = packet["candidate"].get("sigmagnr")
                        # 5 sigma
                        diff_err = (-2.5 * np.log10(10**
                                                    (-0.4 * mref) + sign * 10**
                                                    (-0.4 * phot["lim_mag"])) -
                                    mref) / 5

                        total_mag = mref
                        total_mag_err = np.sqrt(mref_err**2 + diff_err**2)
                except:
                    #print("Error in varstar calc")
                    #print(mdiff, mref, sign, mdiff_err, packet["candidate"].get("magnr"), packet["candidate"].get("sigmagnr"))

                    total_mag = 99
                    total_mag_err = 0

                phot.update({"var_mag": total_mag, "var_e_mag": total_mag_err})

                # just keep all the remaining non-nan values for this epoch
                altdata = dict()
                for k in rj:
                    if not pd.isnull(rj[k]): altdata.update({k: rj[k]})

                phot.update({"altdata": altdata})
                photdata.append(copy.copy(phot))

            photometry = Photometry.query.filter(Photometry.obj_id == packet["objectId"]) \
                                         .filter(Photometry.origin == f"{os.path.basename(self.fname)}")

            skip = False
            if self.clobber:
                if photometry.count() > 0:
                    print("removing preexisting photometry from this packet")
                    photometry.delete()
                    DBSession().commit()

            else:
                if photometry.count() > 0:
                    print(
                        "Existing photometry from this packet. Skipping addition of more."
                    )
                    skip = True

            if not skip:
                if new_source:
                    s.photometry = [
                        Photometry(instrument=self.ztfpack.i1,
                                   obj_id=packet["objectId"],
                                   origin=f"{os.path.basename(self.fname)}",
                                   **row) for j, row in enumerate(photdata)
                    ]
                else:
                    phot_list = [
                        Photometry(instrument=self.ztfpack.i1,
                                   obj_id=packet["objectId"],
                                   origin=f"{os.path.basename(self.fname)}",
                                   **row) for j, row in enumerate(photdata)
                    ]

            # s.spectra = []
            source_is_varstar = source_is_varstar or any(varstarness)
            s.varstar = source_is_varstar
            s.is_roid = is_roid
            s.transient = self._is_transient(dflc)

            DBSession().add(s)
            try:
                DBSession().commit()
            except:
                print("error committing DB")
                pass

            for ttype, ztftype in [('new', 'Science'), ('ref', 'Template'),
                                   ('sub', 'Difference')]:
                fname = f'{packet["candid"]}_{ttype}.png'
                gzname = f'{packet["candid"]}_{ttype}.fits.gz'

                t = Thumbnail(
                    type=ttype,
                    photometry_id=s.photometry[0].id,
                    file_uri=f'static/thumbnails/{packet["objectId"]}/{fname}',
                    origin=f"{os.path.basename(self.fname)}",
                    public_url=
                    f'/static/thumbnails/{packet["objectId"]}/{fname}')
                tgz = Thumbnail(
                    type=ttype + "_gz",
                    photometry_id=s.photometry[0].id,
                    file_uri=f'static/thumbnails/{packet["objectId"]}/{gzname}',
                    origin=f"{os.path.basename(self.fname)}",
                    public_url=
                    f'/static/thumbnails/{packet["objectId"]}/{gzname}')
                DBSession().add(t)
                stamp = packet['cutout{}'.format(ztftype)]['stampData']

                if (not os.path.exists(self.ztfpack.basedir/f'static/thumbnails/{packet["objectId"]}/{fname}') or \
                    not os.path.exists(self.ztfpack.basedir/f'static/thumbnails/{packet["objectId"]}/{gzname}')) and \
                    not self.clobber:
                    with gzip.open(io.BytesIO(stamp), 'rb') as f:
                        gz = open(f"/tmp/{gzname}", "wb")
                        gz.write(f.read())
                        gz.close()
                        f.seek(0)
                        with fits.open(io.BytesIO(f.read())) as hdul:
                            hdul[0].data = np.flip(hdul[0].data, axis=0)
                            ffig = aplpy.FITSFigure(hdul[0])
                            ffig.show_grayscale(
                                stretch='arcsinh',
                                invert=True)  #ztftype != 'Difference')
                            ffig.save(f"/tmp/{fname}")
                    if not os.path.exists(
                            self.ztfpack.basedir /
                            f'static/thumbnails/{packet["objectId"]}'):
                        os.makedirs(self.ztfpack.basedir /
                                    f'static/thumbnails/{packet["objectId"]}')
                    shutil.copy(
                        f"/tmp/{fname}", self.ztfpack.basedir /
                        f'static/thumbnails/{packet["objectId"]}/{fname}')
                    shutil.copy(
                        f"/tmp/{gzname}", self.ztfpack.basedir /
                        f'static/thumbnails/{packet["objectId"]}/{gzname}')

            try:
                s.add_linked_thumbnails()
            except:
                print("Not linking thumbnails...not on the 'net?")

            # grab the photometry for this source and update relevant quanities

            # ra, dec update
            dat = pd.read_sql(
                DBSession().query(Photometry).filter(
                    Photometry.obj_id == packet["objectId"]).filter(
                        Photometry.mag < 30).statement,
                DBSession().bind)
            if not s.varstar:
                infos = [(x["altdata"]["ra"], x["altdata"]["dec"], x["mag"],
                          x["e_mag"], x["score"], x["filter"])
                         for i, x in dat.iterrows()]
            else:
                infos = [
                    (x["altdata"]["ra"], x["altdata"]["dec"], x["var_mag"],
                     x["var_e_mag"], x["score"], x["filter"])
                    for i, x in dat.iterrows()
                ]

            ndet = len(dat[~pd.isnull(dat["mag"])])
            s.detect_photometry_count = ndet
            s.last_detected = np.max(
                dat[~pd.isnull(dat["mag"])]["observed_at"])

            calc_source_data = dict()
            new_ra = np.average([x[0] for x in infos],
                                weights=[1. / x[3] for x in infos])
            new_dec = np.average([x[1] for x in infos],
                                 weights=[1. / x[3] for x in infos])
            ra_err = np.std([x[0] for x in infos])
            dec_err = np.std([x[1] for x in infos])

            calc_source_data.update(
                {"min_score": np.nanmin([x[4] for x in infos])})
            calc_source_data.update(
                {"max_score": np.nanmax([x[4] for x in infos])})

            filts = list(set([x[-1] for x in infos]))
            for f in filts:
                ii = [x for x in infos if x[-1] == f]
                rez = np.average([x[2] for x in ii],
                                 weights=[1 / x[3] for x in ii])
                if pd.isnull(rez):
                    rez = None
                md = np.nanmax([x[2]
                                for x in ii]) - np.nanmin([x[2] for x in ii])
                max_delta = md if not pd.isnull(md) else None

                calc_source_data.update(
                    {f: {
                        "max_delta": max_delta,
                        "mag_avg": rez
                    }})

            s = Source.query.get(packet["objectId"])

            altdata = dict()
            for k in calc_source_data:
                if not pd.isnull(calc_source_data[k]):
                    altdata.update({k: calc_source_data[k]})

            s.altdata = altdata
            s.ra = new_ra
            s.dec = new_dec
            s.ra_err = ra_err
            s.dec_err = dec_err

            c1 = SkyCoord(s.ra_dis * u.deg, s.dec_dis * u.deg, frame='fk5')
            c2 = SkyCoord(new_ra * u.deg, new_dec * u.deg, frame='fk5')
            sep = c1.separation(c2)
            s.offset = sep.arcsecond if not pd.isnull(sep.arcsecond) else 0.0

            # TNS
            tns = self._tns_search(s.ra_dis, s.dec_dis)
            s.tns_info = tns
            if tns["Name"]:
                s.tns_name = tns["Name"]

            # catalog search
            result_table = customSimbad.query_region(SkyCoord(
                f"{s.ra_dis}d {s.dec_dis}d", frame='icrs'),
                                                     radius='0d0m3s')
            if result_table:
                try:
                    s.simbad_class = result_table["OTYPE"][0].decode(
                        "utf-8", "ignore")

                    altdata = dict()
                    rj = result_table.to_pandas().dropna(
                        axis='columns').iloc[0].to_json()
                    s.simbad_info = rj
                except:
                    pass

            if s.simbad_class:
                comments = [
                    Comment(text=comment,
                            obj_id=packet["objectId"],
                            user=self.ztfpack.group_admin_user,
                            ctype="classification",
                            origin=f"{os.path.basename(self.fname)}")
                    for comment in [f"Simbad class = {s.simbad_class}"]
                ]

            result_table = customGaia.query_region(SkyCoord(ra=s.ra_dis,
                                                            dec=s.dec_dis,
                                                            unit=(u.deg,
                                                                  u.deg),
                                                            frame='icrs'),
                                                   width="3s",
                                                   catalog=["I/345/gaia2"])
            if result_table:
                try:
                    rj = result_table.pop().to_pandas().dropna(
                        axis='columns').iloc[0].to_json()
                    s.gaia_info = rj
                except:
                    pass

            DBSession().commit()
            print("added")
Example #23
0
def non_helioprojective_skycoord():
    return SkyCoord(0 * u.rad, 0 * u.rad, frame="icrs")
Example #24
0
import astropy.units as u
from astropy.coordinates import SkyCoord

import sunpy.map
from sunpy.coordinates import frames

################################################################################
# First we will create a blank map using with an array of zeros.
# Since there is no WCS information, we will need to construct a header to pass to Map.

data = np.full((10, 10), np.nan)

# Define a reference coordinate and create a header using sunpy.map.make_fitswcs_header
skycoord = SkyCoord(0 * u.arcsec,
                    0 * u.arcsec,
                    obstime='2013-10-28',
                    observer='earth',
                    frame=frames.Helioprojective)

# Scale set to the following for solar limb to be in the field of view
header = sunpy.map.make_fitswcs_header(data,
                                       skycoord,
                                       scale=[220, 220] * u.arcsec / u.pixel)

# Use sunpy.map.Map to create the blank map
blank_map = sunpy.map.Map(data, header)

################################################################################
# Now we have constructed the map, we can plot it and mark important locations to it.
# Initialize the plot and add the map to it
Example #25
0
 star.rstar = dat["r"][i]*const.R_sun        
 star.d = dat["dpc"][i]*u.pc #pc
 
 exoplanet=planet.PlanetClass()
 exoplanet.rplanet = 1.0*const.R_earth #Re
 exoplanet.a =float(dat["amin"][i])*u.AU #AU
 exoplanet.albedo = 0.3
 exoplanet.phase = np.pi/2.0
 exoplanet.compute_reflectivity()
 print("Star-Planet Contrast =",exoplanet.reflectivity)
 contrast=exoplanet.reflectivity
 
 ra=str(dat["ra"][i])
 dec=str(dat["dec"][i])
 radec=ra+" "+dec
 c = SkyCoord(radec, unit=(u.hourangle, u.deg))
 decdeg=(c.dec.degree)
 amin=dat["amin"][i]
 d=dat["dpc"][i]
 sep=float(amin)/float(d)
 if decdeg > -40:
     col="C1"
 else:
     col="gray"
 plt.plot([sep],[contrast],"o",color=col)
     
 if str(dat["propername"][i])=="nan":
     plt.text(sep,contrast,dat["name"][i],color=col,alpha=0.5,fontsize=13)
 else:
     plt.text(sep,contrast,dat["propername"][i],color=col,alpha=0.5,fontsize=13)
 print("OK:",dat["name"][i])
Example #26
0
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    sedshape = kwargs.get('sedshape', 'PowerLaw')
    cutoff = kwargs.get('cutoff', 1e3)
    curvindex = kwargs.get('curvindex', 1.0)
    beta = kwargs.get('beta', 0.0)
    dmmass = kwargs.get('DMmass', 100.0)
    dmchannel = kwargs.get('DMchannel', 'bb')
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    galdiff_fit_filepath = kwargs.get('galdiff_fit', None)
    isodiff_fit_filepath = kwargs.get('isodiff_fit', None)
    wcs_npix = kwargs.get('wcs_npix', 40)
    wcs_cdelt = kwargs.get('wcs_cdelt', 0.5)
    wcs_proj = kwargs.get('wcs_proj', 'AIT')
    map_type = kwargs.get('map_type', None)
    spatial_model = kwargs.get('spatial_model', 'PointSource')
    spatial_size = kwargs.get('spatial_size', 1E-2)

    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    nside = kwargs.get('hpx_nside', 16)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]

    if sedshape == 'PowerLaw':
        fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)
    elif sedshape == 'PLSuperExpCutoff':
        fn = spectrum.PLSuperExpCutoff([1E-13, -index, cutoff, curvindex],
                                       scale=1E3)
    elif sedshape == 'LogParabola':
        fn = spectrum.LogParabola([1E-13, -index, beta], scale=1E3)
    elif sedshape == 'DM':
        fn = spectrum.DMFitFunction([1E-26, dmmass], chan=dmchannel)

    log_ebins = np.linspace(np.log10(emin), np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)
    gdiff_fit = None
    if galdiff_fit_filepath is not None:
        gdiff_fit = skymap.Map.create_from_fits(galdiff_fit_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=['$FERMI_DIFFUSE_DIR'])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)
    iso_fit = None
    if isodiff_fit_filepath is not None:
        iso_fit = np.loadtxt(isodiff_fit_filepath, unpack=True)

    scalc = SensitivityCalc(gdiff,
                            iso,
                            ltc,
                            ebins,
                            event_class,
                            event_types,
                            gdiff_fit=gdiff_fit,
                            iso_fit=iso_fit,
                            spatial_model=spatial_model,
                            spatial_size=spatial_size)

    # Compute Maps
    map_diff_flux = None
    map_diff_npred = None
    map_int_flux = None
    map_int_npred = None

    map_nstep = 500

    if map_type == 'hpx':

        hpx = HPX(nside, True, 'GAL', ebins=ebins)
        map_diff_flux = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_diff_npred = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_skydir = map_diff_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.diff_flux_threshold(map_skydir[s], fn, ts_thresh,
                                          min_counts)
            map_diff_flux.data[:, s] = o['flux'].T
            map_diff_npred.data[:, s] = o['npred'].T

        hpx = HPX(nside, True, 'GAL')
        map_int_flux = HpxMap(np.zeros((hpx.npix)), hpx)
        map_int_npred = HpxMap(np.zeros((hpx.npix)), hpx)
        map_skydir = map_int_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.int_flux_threshold(map_skydir[s], fn, ts_thresh,
                                         min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    elif map_type == 'wcs':

        wcs_shape = [wcs_npix, wcs_npix]
        wcs_size = wcs_npix * wcs_npix

        map_diff_flux = Map.create(c,
                                   wcs_cdelt,
                                   wcs_shape,
                                   'GAL',
                                   wcs_proj,
                                   ebins=ebins)
        map_diff_npred = Map.create(c,
                                    wcs_cdelt,
                                    wcs_shape,
                                    'GAL',
                                    wcs_proj,
                                    ebins=ebins)
        map_skydir = map_diff_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (slice(None), idx[1], idx[0])
            o = scalc.diff_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                          fn, ts_thresh, min_counts)
            map_diff_flux.data[s] = o['flux'].T
            map_diff_npred.data[s] = o['npred'].T

        map_int_flux = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_int_npred = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_skydir = map_int_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (idx[1], idx[0])
            o = scalc.int_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                         fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [
        Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
        Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
        Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
        Column(name='flux', dtype='f8', data=o['flux'], unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', data=o['eflux'],
               unit='MeV / (cm2 s)'),
        Column(name='dnde',
               dtype='f8',
               data=o['dnde'],
               unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde',
               dtype='f8',
               data=o['e2dnde'],
               unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', data=o['npred'], unit='ph')
    ]

    tab_diff = Table(cols)

    cols = [
        Column(name='index', dtype='f8'),
        Column(name='e_min', dtype='f8', unit='MeV'),
        Column(name='e_ref', dtype='f8', unit='MeV'),
        Column(name='e_max', dtype='f8', unit='MeV'),
        Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', unit='ph'),
        Column(name='ebin_e_min', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_ref', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_max', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_flux',
               dtype='f8',
               unit='ph / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_eflux',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_dnde',
               dtype='f8',
               unit='ph / (MeV cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_e2dnde',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_npred', dtype='f8', unit='ph', shape=(len(ectr), ))
    ]

    cols_ebounds = [
        Column(name='E_MIN', dtype='f8', unit='MeV', data=ebins[:-1]),
        Column(name='E_MAX', dtype='f8', unit='MeV', data=ebins[1:]),
    ]

    tab_int = Table(cols)
    tab_ebounds = Table(cols_ebounds)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if colname == 'index':
                continue
            if 'ebin' in colname:
                row += [o['bins'][colname.replace('ebin_', '')]]
            else:
                row += [o[colname]]

        tab_int.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_ebounds))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'EBOUNDS'

    if map_type is not None:
        hdu = map_diff_flux.create_image_hdu()
        hdu.name = 'MAP_DIFF_FLUX'
        hdulist.append(hdu)
        hdu = map_diff_npred.create_image_hdu()
        hdu.name = 'MAP_DIFF_NPRED'
        hdulist.append(hdu)

        hdu = map_int_flux.create_image_hdu()
        hdu.name = 'MAP_INT_FLUX'
        hdulist.append(hdu)
        hdu = map_int_npred.create_image_hdu()
        hdu.name = 'MAP_INT_NPRED'
        hdulist.append(hdu)

    hdulist.writeto(output, overwrite=True)
Example #27
0
def region():
    center = SkyCoord("0 deg", "0 deg", frame="galactic")
    return CircleSkyRegion(center=center, radius=1 * u.deg)
    peccObs.to_csv(os.path.join('.', 'pecc', 'obs-OBC-ecc-p.csv'),
                   header=['e', 'p'])
    peccRec.to_csv(os.path.join('.', 'pecc', 'rec-OBC-ecc-p.csv'),
                   header=['e', 'p'])

    #plot and save the histograms
    saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
    saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
    saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
    saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
    saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
    saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
    saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')

    #make the mollweide
    coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree), frame='icrs')
    lGal = coords.galactic.l.wrap_at(180. * units.degree).degree
    bGal = coords.galactic.b.wrap_at(180. * units.degree).degree
    RAwrap = coords.ra.wrap_at(180. * units.degree).degree
    Decwrap = coords.dec.wrap_at(180. * units.degree).degree

    plotPath = os.path.join('.', 'plots')
    f, ax = plt.subplots(subplot_kw={'projection': "mollweide"},
                         figsize=(8, 5))
    ax.grid(True)
    #ax.set_xlabel(r"$l$",fontsize=16)
    #ax.set_ylabel(r"$b$",fontsize=16)
    #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)
    ax.set_xlabel("RA", fontsize=16)
    ax.set_ylabel("Dec", fontsize=16)
    mlw = ax.scatter(np.array(RAwrap).ravel() * np.pi / 180.,
parser = argparse.ArgumentParser(
    description='Convert RA and DEC to Alt and Az')

parser.add_argument("-R", "--input_RA", help="Input as: XXhYYmZZ.ZZZs")
parser.add_argument("-D", "--input_DEC", help="Input as: XXdYYmZZ.ZZZs")
parser.add_argument("-t", "--time", help="time for conversion to alt,az, MJD")

args = parser.parse_args()
RA = args.input_RA
DEC = args.input_DEC
time_MJD = args.time

#I-LOFAR
lat = 53.09469
lon = -7.92153
z = 75
#LOFAR-SE
#lat = 57.39885
#lon = 11.93029
#z = 18

target_coord = SkyCoord(RA, DEC, frame='icrs')
observer_coord = EarthLocation(lat=lat * u.deg,
                               lon=lon * u.deg,
                               height=z * u.m)
time = Time(time_MJD, format='mjd')

altaz = target_coord.transform_to(AltAz(obstime=time, location=observer_coord))
print(altaz)
Example #30
0
def filter_aliases(table, week=None):
    """
    Remove the few pesky sources that are known to be aliases of other sources
    """
    if week is None:
        print "no week supplied - not filtering aliases"
        return table
    print "filtering aliased sources"
    # read lines from files
    if week == 3:
        f = "Week3_CenA_ghosts.reg"
    elif week == 4:
        f = "Week4_HerA_ghost.reg"
    else:
        print "there are no known aliased sources for week ", week
        return table
    if not os.path.exists(f):
        if os.path.exists(mwa_code_base +
                          "/MWA_Tools/gleam_scripts/mosaics/scripts/" + f):
            f = mwa_code_base + "/MWA_Tools/gleam_scripts/mosaics/scripts/" + f
        else:
            print "cannot find ", f
            sys.exit(1)
    lines = (a for a in open(f).readlines() if a.startswith('ellipse'))
    # convert lines into ra,dec,a,b,pa
    words = [re.split('[(,\s)]', line) for line in lines]
    pos = [SkyCoord(w[1], w[2], unit=(u.hourangle, u.degree)) for w in words]
    ra = np.array([p.ra.degree for p in pos])
    dec = np.array([p.dec.degree for p in pos])
    shape = [map(lambda x: float(x.replace('"', '')), w[3:6]) for w in words]
    a, b, pa = map(np.array, zip(*shape))
    # convert from ds9 to 'true' position angle
    pa += 90
    a /= 3600.
    b /= 3600.
    # all params are now in degrees
    kill_list = []
    # loops for now
    for i in range(len(ra)):
        # define a box that is larger than needed
        dmin = dec[i] - 3 * a[i]
        dmax = dec[i] + 3 * a[i]
        rmin = ra[i] - 3 * a[i]
        rmax = ra[i] + 3 * a[i]
        # Select all sources within this box
        mask = np.where((table['dec'] < dmax) & (table['dec'] > dmin)
                        & (table['ra'] > rmin) & (table['ra'] < rmax))[0]
        if len(mask) < 1:
            continue
        # create a catalog of this subset
        cat = SkyCoord(table[mask]['ra'],
                       table[mask]['dec'],
                       unit=(u.degree, u.degree))
        # define our reference position and find all sources that are within the error ellipse
        p = pos[i]
        # yay for vectorized functions in astropy
        offset = p.separation(cat).degree
        pa_off = p.position_angle(cat).radian
        pa_diff = pa_off - np.radians(pa[i])
        radius = a[i] * b[i] / np.sqrt((b[i] * np.cos(pa_diff))**2 +
                                       (a[i] * np.sin(pa_diff))**2)
        # print 'radius', radius
        # print 'offset', offset
        to_remove = np.where(radius >= offset)[0]
        # print 'sources within box', mask
        # print 'marked for removal',to_remove,
        # print mask[to_remove]
        if len(to_remove) < 1:
            continue
        # save the index of the sources that are within the error ellipse
        kill_list.extend(mask[to_remove])
    print "table has ", len(table), "sources"
    print "there are ", len(kill_list), "sources to remove"
    table.remove_rows(kill_list)
    print "there are now ", len(table), "sources left in the table"
    return table