def read_data(self, f_min, f_max,force_read=False): self.uv = pyuvdata.UVData() if os.path.exists(self.data_dir + self.autos_file) and not force_read: self.uv.read(self.data_dir + self.autos_file) holder_freq_array_1 = self.uv.freq_array <= f_max*1e6 holder_freq_array_2 = self.uv.freq_array >= f_min*1e6 holder_freq_array = np.full(holder_freq_array_1.shape, True, dtype=bool) holder_freq_array[holder_freq_array_1==False] = False holder_freq_array[holder_freq_array_2==False] = False self.uv.read(self.data_dir + self.autos_file,freq_chans = holder_freq_array) else: file_lists = np.sort(glob.glob(self.data_dir + self.filestart + self.fileend)) uv_temp = pyuvdata.UVData() for i in range(len(file_lists)): print file_lists[i] if i == 0: self.uv.read([file_lists[i]],ant_str='auto') #self.uv.select(time_range=) else: uv_temp.read([file_lists[i]],ant_str='auto') #uv_temp.select(time_range=[]) self.uv += uv_temp self.uv.write_uvh5(self.data_dir + self.autos_file) # Get some useful parameters self.lsts, ind = np.unique(self.uv.lst_array, return_index=True) order = np.argsort(ind) self.lsts = 24 * self.lsts[order] / (2 * np.pi) self.wrap = np.argmax(self.lsts) self.freqs = self.uv.freq_array.flatten() * 1e-6 self.ants = self.uv.get_ants()
def generate_residual_IDR2_2(uvh5_file, omni_vis, omni_calfits, abs_calfits, outfile, clobber=False): # reading uvh5 data file hd = HERAData(uvh5_file) data, flags, nsamples = hd.read(polarizations=['ee', 'nn']) # reading omnical model visibilities hd_oc = HERAData(omni_vis) omnivis, omnivis_flags, _ = hd_oc.read() uvo = pyuvdata.UVData() uvo.read_uvh5(omni_vis) # reading calfits file hc = HERACal(omni_calfits) oc_gains, oc_flags, oc_quals, oc_total_quals = hc.read() hc = HERACal(abs_calfits) ac_gains, ac_flags, ac_quals, ac_total_quals = hc.read() # calibrating the data abscal_data, abscal_flags = copy.deepcopy(data), copy.deepcopy(flags) calibrate_in_place(abscal_data, ac_gains, data_flags=abscal_flags, cal_flags=ac_flags) res_data, res_flags = copy.deepcopy(hd.data_array), copy.deepcopy( hd.flag_array) resdata, resflags = copy.deepcopy(abscal_data), copy.deepcopy(abscal_flags) for i, p in enumerate(['ee', 'nn']): # reading omnical model visibilities hd_oc = HERAData(omni_vis) omnivis, omnivis_flags, _ = hd_oc.read(polarizations=[p]) mod_bls = list(omnivis.keys()) red_bls = get_reds(hd.antpos, pols=p) red = gr.RBL(red_bls) for mbl in mod_bls: bl_grp = red[tuple(mbl[0:2]) + ('J{}'.format(p), )] for blp in bl_grp: bl = (blp[0], blp[1], p) inds = hd.antpair2ind(bl) omnivis_scaled = omnivis[mbl] * oc_gains[(blp[0], 'J{}'.format( p))] * np.conj(oc_gains[(blp[1], 'J{}'.format(p))]) omnivis_scaled /= ( ac_gains[(blp[0], 'J{}'.format(p))] * np.conj(ac_gains[(blp[1], 'J{}'.format(p))])) resdata[bl] = abscal_data[bl] - omnivis_scaled resflags[bl] = abscal_flags[bl] res_data[inds, 0, :, i] = resdata[bl] res_flags[inds, 0, :, i] = resflags[bl] # writing to file hd.data_array = res_data hd.flag_array = res_flags hd.write_uvh5(outfile, clobber=clobber)
def genclosurephase(fin,**kwargs): print(fin) hh = c.hf(genclosurephase, fin) mm = repo.get(hh) print(mm) if mm: c.trc( "[Cached]", "makemsfile", fin) else: c.trc( "[Eval] ", "makemsfile", fin, " -> ", hh) UV = pyuvdata.UVData() UV.read_miriad(fin) UV.phase_to_time(Time(UV.time_array[0], format='jd', scale='utc')) tempf = repo.mktemp() os.remove(tempf) UV.write_uvfits(tempf, spoof_nonessential=True) if not os.path.exists(tempf): raise RuntimeError("No output produced by mkuvfits!") foms = c.importuvfits(tempf) os.remove(tempf) flms = c.flagdata(foms,autocorr=True) mm = repo.put(flms, hh) fout=os.path.split(fin)[-1]+".npz" r=hc.closurePh(mm,trlist=inTriads,alist=inAntenna) np.savez(fout,**r) if not os.path.exists(fout): raise RuntimeError("No output produced by hc.closurePh !") return(fout)
def load_pyuvdata(self, filename, chtypes, fold_factor, psize): uv = pyuvdata.UVData() uv.read_miriad(filename) self.uv = copy(uv) self.antpairs = copy(uv.get_antpairs()) self.dset_size = np.shape(self.uv.data_array)[0] / 60 self.chtypes = chtypes self.fold_factor = fold_factor #16 self.psize = psize #68
def mkuvfits(file,outputdir): head,tail = os.path.split(file) if os.path.exists(file+"fits") == False: UV =pyuvdata.UVData() UV.read_miriad(file,'miriad') UV.phase_to_time(UV.time_array[0]) UV.write_uvfits(outputdir+tail+"fits",'uvfits') return file+"fits"
def mkuvfits(fin, fout): UV = pyuvdata.UVData() #Read file and rewrap phases. UV.read_miriad(fin, 'miriad') UV.phase_to_time(UV.time_array[0]) UV.write_uvfits(fout, 'uvfits') #Output to .ms directory casa.importuvfits(fout, fout + '.ms')
def loadPYUVdata(file_locs, suffix): files = np.array(glob(file_locs + '*' + suffix)) info = {} ct = 0 rnd_ = np.random.randint(len(files), size=5) file_cut = files[rnd_] for f in file_cut: print(f) uv = pyuvdata.UVData() uv.read_miriad(f, run_check_acceptability=False, run_check=False, check_extra=False) antpairs = uv.get_antpairs() flag_npz_name = '.'.join(f.split('.')[:5]) + '.uvOC.flags.npz' try: flag_npz = np.load(flag_npz_name) except: #continue print('Skipping because npz flag file -{}- not found'.format( flag_npz_name)) for i in range(10): rnd = np.random.randint(len(antpairs)) ap1, ap2 = antpairs[rnd] bsl = uv.antnums_to_baseline(ap1, ap2) try: HERAlabels_ = np.logical_not( flag_npz['flag_array'][bsl == flag_npz['baseline_array']]) except: HERAlabels_ = 0. #pass pos1 = uv.antenna_positions[uv.antenna_numbers == ap1] pos2 = uv.antenna_positions[uv.antenna_numbers == ap2] bl_len = np.round(np.sqrt(np.sum((pos1 - pos2)**2)), 2) info[f + '_{0}'.format(ct)] = '{0}_blen_{1}'.format( ct, bl_len ) #str(ct)+'_blen_'+str() #['antpairs'][ct] = antpairs[rnd] #antpairs[rnd] : ct} if f == file_cut[0] and i == 0: HERAdata = [uv.get_data(antpairs[rnd]).squeeze()] try: HERAlabels = [HERAlabels_.squeeze()] except: HERAlabels = [] else: HERAdata.append(uv.get_data(antpairs[rnd]).squeeze()) try: HERAlabels.append(HERAlabels_.squeeze()) except: HERAlabels = [] ct += 1 del (uv) print('Dataset size: ', np.shape(HERAdata)) if np.ndim(HERAlabels) > 1: HERAlabels = np.zeros_like(HERAdata).real return HERAdata, HERAlabels, info
def main(): command = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(''' --------------------------------------------------------------------------------------- HERA Array Topology Viewer Author: James Kent Institution: University of Cambridge, 2018 Email: [email protected] Takes a HERA miriad file and plots the array topology. Takes x,y locations to plot top-down view of the array, with antenna numbers. ''')) command.add_argument('filepath', help=('miriad file from HERA')) args = command.parse_args() UV = pyuvdata.UVData() UV.read_miriad(args.filepath) ant_locs = UV.antenna_positions ant_nums = UV.antenna_numbers ant_x = ant_locs[:, 0] ant_y = ant_locs[:, 1] current_rot_angle = numpy.arctan2(ant_y[-1], ant_x[-1]) print("Current Rotation: ") print(current_rot_angle) # Rotate antenna positions angle_rot = -current_rot_angle cost = numpy.cos(angle_rot) sint = numpy.sin(angle_rot) xcost = ant_x * cost xsint = ant_x * sint ysint = ant_y * sint ycost = ant_y * cost x_rot = xcost - ysint y_rot = xsint + ycost fig, ax = plt.subplots(figsize=(8, 8)) ax.scatter(x_rot, y_rot) for i, label in enumerate(ant_nums): ax.annotate(label, (x_rot[i], y_rot[i])) ax.set_xlabel('x') ax.set_ylabel('y') plt.savefig("hera_array.pdf") plt.show()
def read_uvfile(uvfile): """ Reads in miriad file and returns the observed visibilities and the corresponding flags. Parameters ---------- uvfile : string Name of input uvfile containing the visibilities and corresponding metadata. """ uvd = pyuvdata.UVData() uvd.read_miriad(uvfile) return uvd
def miriad2pyuvdata(dset, antenna_nums=None, bls=None, polarizations=None, ant_str=None, time_range=None): """ Reads-in a Miriad filepath to a UVData object Parameters ---------- dset : str Miriad file to convert to UVData object containing visibilities and corresponding metadata antenna_nums: integer list The antennas numbers to read into the object. bls: list of tuples A list of antenna number tuples (e.g. [(0,1), (3,2)]) specifying baselines to read into the object. Ordering of the numbers within the tuple does not matter. A single antenna iterable e.g. (1,) is interpreted as all visibilities with that antenna. ant_str: str A string containing information about what kinds of visibility data to read-in. Can be 'auto', 'cross', 'all'. Cannot provide ant_str if antenna_nums and/or bls is not None. polarizations: integer or string list List of polarization integers or strings to read-in. Ex: ['xx', 'yy', ...] time_range: float list len-2 list containing min and max range of times (Julian Date) to read-in. Ex: [2458115.20, 2458115.40] Returns ------- uvd : pyuvdata.UVData object """ uvd = pyuvdata.UVData() uvd.read_miriad(dset, antenna_nums=antenna_nums, bls=bls, polarizations=polarizations, ant_str=ant_str, time_range=time_range) return uvd
def read_uvfits(uvfits): """ Reads in uvfits file and returns the UVData object containing the model visibilities and corresponding metadata. Parameters ---------- uvfits : string Name of input uvfits files containing the model visibilities and the corresponding metadata. """ uvf = pyuvdata.UVData() uvf.read_uvfits(uvfits) uvf.unphase_to_drift() return uvf
def convert_uv(self, phs=None, del_uvfits=False, script='uvfits2ms', del_script=True, clobber=False): """ Converts Miriad file to Measurement set (MS) Parameters ---------- phs : float, optional Julian date at which to phase the visibilities. By default the visibilities are phased to middle timestamp of the file del_uvfits : boolean, optional If True, deleted the uvfits file that is created during the conversion from uvh5_file to ms. Default is False. script : string, optional Casa script created on-the-fly to execute the casa task. Default is uvfits2ms. del_script: boolean, optional If True, deletes the on-fly created casa script. Default is True. clobber : boolean, optional If True, overwrites the existing file by the new one. Default is False """ uvd = pyuvdata.UVData() uvd.read_uvh5(self.uvh5_file, run_check=False) times = uvd.time_array if not uvd.phase_type is 'phased': phs_time = times[int(len(times) / 2.)] if phs is None else phs print('Phasing visibilities to {}'.format(phs_time)) uvd.phase_to_time(Time(phs_time, format='jd', scale='utc')) # converting to uvfits uvfits = self.uvh5_file + '.uvfits' print('Converting {} to {}'.format(self.uvh5_file, uvfits)) uvd.write_uvfits(uvfits, spoof_nonessential=True, run_check=False) # converting to mset if clobber: if os.path.exists(self.outfile): os.system('rm -rf {}'.format(self.outfile)) ct.uvfits2ms(uvfits, outfile=self.outfile, script=script, delete=del_script) # removing uvfits if del_uvfits: os.system('rm -rf {}'.format(uvfits)) # removing log files os.system('rm -rf *.log')
def load_data( data_path='/Users/ruby/EoR/compact_redundant_array_sim_May2020/square_grid_sim__results.uvh5', uvw_match_tolerance=1e-12): # Load data from pyuvsim simulation: data_sim_compact = pyuvdata.UVData() data_sim_compact.read_uvh5(data_path) # Remove autos data_sim_compact.select(ant_str='cross') # Use only XX polarizations data_sim_compact.select(polarizations=[-5]) # Convert baselines to have u>0 data_sim_compact.conjugate_bls(convention='u>0', use_enu=False, uvw_tol=0.01) baseline_groups, bl_group_uvw, lengths, conjugates = ( data_sim_compact.get_redundancies(tol=0.1, use_antpos=False, include_conjugates=True, include_autos=True, conjugate_bls=False)) # Define constants N_red_baselines = np.shape(baseline_groups)[0] # Reorder visibilities data_sim_vis = np.zeros(N_red_baselines, dtype=np.complex_) for red_group in range(N_red_baselines): found_group = False for red_group_2 in range(N_red_baselines): if np.abs( np.sum(data_sim_compact.uvw_array[red_group] - bl_group_uvw[red_group_2])) < uvw_match_tolerance: data_sim_vis[red_group] = ( data_sim_compact.data_array[red_group_2, 0, 0, 0]) found_group = True break if not found_group: print('ERROR: Visibility not found.') # Make noiseless data data_sim_expanded = data_sim_compact.copy() data_sim_expanded.inflate_by_redundancy() return data_sim_expanded, data_sim_vis, bl_group_uvw
def test_construct_pstokes_multipol(self): """test construct_pstokes on multi-polarization files""" uvd = pyuvdata.UVData() uvd.read(multipol_dset) uvc = pyuvdata.UVCal() uvc.read_calfits(multipol_dset_cal) uvutils.uvcalibrate(uvd, uvc) wgts = [(0.5, 0.5), (0.5, -0.5)] for i, ps in enumerate(['pI', 'pQ']): uvp = pstokes.construct_pstokes(dset1=uvd, dset2=uvd, pstokes=ps) # assert polarization array is correct assert uvp.polarization_array == np.array([i + 1]) # assert data are properly summmed pstokes_vis = uvd.get_data(23, 24, 'xx') * wgts[i][0] + uvd.get_data(23, 24, 'yy') * wgts[i][1] assert np.isclose(pstokes_vis, uvp.get_data(23, 24, ps)).all()
def test_redundancy_finder_fully_redundant_array(): """Test the redundancy finder only returns one baseline group for fully redundant array.""" uvd = pyuvdata.UVData() uvd.read_uvfits(os.path.join(DATA_PATH, 'test_redundant_array.uvfits')) uvd.select(times=uvd.time_array[0]) tol = 1 # meters bl_positions = uvd.uvw_array baseline_groups, vec_bin_centers, lens, conjugates = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True) # Only 1 set of redundant baselines assert len(baseline_groups) == 1 # Should return the input baselines assert baseline_groups[0].sort() == np.unique(uvd.baseline_array).sort()
def mkuvfits(fin): hh=c.hf(mkuvfits, fin) mm=repo.get(hh) if mm: c.trc( "[Cached]", "mkuvfits", fin) return mm else: c.trc( "[Eval] ", "mkuvfits", fin, " -> ", hh) UV = pyuvdata.UVData() UV.read_miriad(fin,'miriad') UV.phase_to_time(UV.time_array[0]) tempf=repo.mktemp() os.remove(tempf) UV.write_uvfits(tempf,'uvfits') if not os.path.exists(tempf): raise RuntimeError("No output produced by mkuvfits !") return repo.put(tempf, hh)
def get_VI_data(vis_data_path): uvd = pyuvdata.UVData() uvd.read_uvh5(vis_data_path) # one of these days... xx_integer = pyuvdata.utils.polstr2num('xx') yy_integer = pyuvdata.utils.polstr2num('yy') xx_ind = np.argwhere(uvd.polarization_array == xx_integer)[0][0] yy_ind = np.argwhere(uvd.polarization_array == yy_integer)[0][0] VI_data = uvd.data_array[:, :, :, xx_ind] + uvd.data_array[:, :, :, yy_ind] uvd.select(polarizations=(-5)) uvd.polarization_array[0] = 1 uvd.data_array = VI_data.reshape(VI_data.shape + (1, )) return uvd
def getUVData(directory,datdictionary): ''' searches the directory for a file that matches file pattern (recid and ant from datdictionary) returns pyuvdata object ''' logger = logger_defaults.getModuleLogger(__name__) rec = datdictionary['recid'] ant = datdictionary['ant'] fnamepattern = os.path.expanduser(os.path.join(directory,'*_' + str(rec) + '_*_' + ant + '.h5')) fnamelist = glob.glob(fnamepattern) if len(fnamelist) != 1: logger.error('there is not exactly 1 file matching the pattern. Got {}'.format(fnamelist)) raise RuntimeError('not 1 filename matching the patter') fname = fnamelist[0] UV = pyuvdata.UVData() UV.read_uvh5(fname) return UV
def makemsfile(fin,**kwargs): print(fin) hh = c.hf(makemsfile, fin) mm = repo.get(hh) print(mm) if mm: c.trc( "[Cached]", "makemsfile", fin) else: c.trc( "[Eval] ", "makemsfile", fin, " -> ", hh) UV = pyuvdata.UVData() UV.read_miriad(fin) UV.phase_to_time(Time(UV.time_array[0], format='jd', scale='utc')) tempf = repo.mktemp() os.remove(tempf) UV.write_uvfits(tempf, spoof_nonessential=True) if not os.path.exists(tempf): raise RuntimeError("No output produced by mkuvfits!") foms = c.importuvfits(tempf) os.remove(tempf) #flms = c.flagdata(foms,autocorr=True) mm = repo.put(foms, hh) return(mm)
def load_model( data_sim_expanded, bl_group_uvw, model_path='/Users/ruby/EoR/compact_redundant_array_sim_May2020/square_grid_100mjy_sim_results.uvh5', uvw_match_tolerance=1e-12): # Define constant N_red_baselines = np.shape(bl_group_uvw)[0] # Load data with missing sources from pyuvsim simulation: model_sim = pyuvdata.UVData() model_sim.read_uvh5(model_path) # Remove autos model_sim.select(ant_str='cross') # Use only XX polarizations model_sim.select(polarizations=[-5]) # Convert baselines to have u>0 model_sim.conjugate_bls(convention='u>0', use_enu=False, uvw_tol=0.01) model_sim_visibilities = np.zeros(N_red_baselines, dtype=np.complex_) for red_group in range(N_red_baselines): found_group = False for red_group_2 in range(N_red_baselines): if np.abs( np.sum(model_sim.uvw_array[red_group] - bl_group_uvw[red_group_2])) < uvw_match_tolerance: model_sim_visibilities[red_group] = ( model_sim.data_array[red_group_2, 0, 0, 0]) found_group = True break if not found_group: print('ERROR: Visibility not found.') return model_sim_visibilities
qIn = dsIn.q_hat(key1, key2, use_cov=False, cov_flagging=False) MCn, WCn = dsCn.get_MW(FCn, mode='I') MIn, WIn = dsIn.get_MW(FIn, mode='I') pCn = dsCv.p_hat(MCn, qCn, scalar=scalar) pIn = dsIv.p_hat(MIn, qIn, scalar=scalar) pCns.append(pCn) pIns.append(pIn) return np.array(pIvs), np.array(pIns) new_files_odd = [] new_files_even = [] lsts_odd = [] lsts_ODD = [] #### PAPER LST binned files dont have equal time samples, find files with 21 LST samples for i in dfiles_odd: _d = uv.UVData() _d.read_miriad(i) if len(np.unique(_d.time_array)) == 21: new_files_odd.append(i) lsts_ODD.append(np.unique(_d.lst_array)) lsts_odd.append(np.mean(np.unique(_d.lst_array))) lsts_ODD = np.sort(np.array(lsts_ODD).reshape(-1)) lsts_even = [] lsts_EVEN = [] for i in dfiles_even: _d = uv.UVData() _d.read_miriad(i) if len(np.unique(_d.time_array)) == 21: new_files_even.append(i)
import pyuvdata import glob import numpy as np from math import pi pathlist = 3 * ['/data6/HERA/data/2458042/zen.2458042.'] obs = ['12552', '48343', '53563'] for k in range(3): pathlist[k] += obs[k] + '.xx.HH.uv' UV0 = pyuvdata.UVData() UV48 = pyuvdata.UVData() UV55 = pyuvdata.UVData() UV0.read_miriad(pathlist[0]) UV48.read_miriad(pathlist[1]) UV55.read_miriad(pathlist[2]) begin = UV0.lst_array[0] ev_beg = UV48.lst_array[0] + 2 * pi end = UV55.lst_array[0] + 2 * pi lst = np.array([begin, ev_beg, end]) * 24 / (2 * pi) lst_len = np.diff(lst) print('The LSTs of the events were: ' + str(lst)) print('The differences between the events were ' + str(lst_len))
import pyuvdata, numpy from pylab import * args = sys.argv[1:] print(args) lst = np.array([]) for lstfile in args: uv = pyuvdata.UVData() uv.read_miriad(lstfile, run_check=False, run_check_acceptability=False) waterfall = (uv.nsample_array[np.where( uv.baseline_array == 264257)].squeeze()) try: data = np.append(data, waterfall, axis=0) except NameError: data = waterfall lstslice = (uv.lst_array[np.where(uv.baseline_array == 264257)].squeeze()) lst = np.append(lst, lstslice) indices = numpy.argsort(lst) lst = lst[indices] imshow(numpy.abs(data[indices]), interpolation='nearest', aspect='auto', extent=[uv.freq_array[0, 0], uv.freq_array[0, -1], lst[-1], lst[0]]) xlabel('Frequency (Hz)') ylabel('Local Sidereal Time (Radians)') cb = colorbar() cb.set_label('Number of Samples')
def create_snap_uvdata(snapdict, azoffset, eloffset, recid, setid=None): logger_defaults.getModuleLogger(__name__) obj = pyuvdata.UVData() ant = snapdict['ant'] ashape = numpy.shape(snapdict['auto0']) #obj.latitude = ata_constants.ATA_LAT #obj.longitude = ata_constants.ATA_LON #obj.altitude = ata_constants.ATA_ELEV try: obj.telescope_location = pyuvdata.uvutils.XYZ_from_LatLonAlt( ata_constants.ATA_LAT / 180.0 * numpy.pi, ata_constants.ATA_LON / 180.0 * numpy.pi, ata_constants.ATA_ELEV) except AttributeError: obj.telescope_location = pyuvdata.utils.XYZ_from_LatLonAlt( ata_constants.ATA_LAT / 180.0 * numpy.pi, ata_constants.ATA_LON / 180.0 * numpy.pi, ata_constants.ATA_ELEV) obj.telescope_name = ata_constants.ATA_NAME obj.instrument = ata_constants.ATA_NAME + snapdict['host'] if azoffset == 0 and eloffset == 0: obj.object_name = snapdict['source'] else: obj.object_name = '{0:s}_off_{1:03.1f}_{2:03.1f}'.format( snapdict['source'], azoffset, eloffset) obj.history = 'Snap Waterfall measurement' obj.phase_type = 'phased' obj.Nants_data = 1 obj.Nants_telescope = len(ata_constants.ant_names) aind = ata_constants.ant_names.index(ant) obj.ant_1_array = numpy.array([aind] * ashape[0]) obj.ant_2_array = numpy.array([aind] * ashape[0]) obj.baseline_array = pow(2, 16) + (numpy.array([aind] * ashape[0]) + 1) * 2049 obj.antenna_names = ata_constants.ant_names obj.antenna_numbers = list(range(len(ata_constants.ant_names))) obj.Nbls = 1 obj.Nblts = ashape[0] #should be the same as snapdict['ncaptures'] obj.Nfreqs = ashape[1] obj.Npols = 2 #that may be wrong obj.Ntimes = ashape[0] obj.Nspws = 1 obj.uvw_array = numpy.zeros((ashape[0], 3), dtype=float) tt = Time(snapdict['auto0_timestamp'], format='unix', location=(ata_constants.ATA_LON, ata_constants.ATA_LAT, ata_constants.ATA_ELEV)) #obj.time_array = tt.to_value('mjd', 'long') obj.time_array = tt.jd #utils.get_lst_for_time? obj.lst_array = numpy.array(tt.sidereal_time('apparent')) / 12 * numpy.pi #obj.integration_time = [ashape[1]/(snapdict['srate']*1e6)] *ashape[0] obj.integration_time = [snapdict['tint']] * ashape[0] tmparray = numpy.zeros((1, len(snapdict['frange']))) tmparray[0][:] = snapdict['frange'] * 1e6 obj.freq_array = tmparray #obj.channel_width = snapdict['srate']/2 obj.channel_width = (snapdict['frange'][1] - snapdict['frange'][0]) * 1e6 #i am not sure about that obj.spw_array = [1] #-5 is XX, -6 is YY obj.polarization_array = [-5, -6] obj.vis_units = 'uncalib' obj.nsample_array = numpy.ones((ashape[0], 1, ashape[1], 2), dtype=float) obj.flag_array = numpy.zeros((ashape[0], 1, ashape[1], 2), dtype=bool) obj.data_array = numpy.zeros((ashape[0], 1, ashape[1], 2), dtype=numpy.complex64) xx = numpy.array(snapdict['auto0'], dtype=numpy.complex64) obj.data_array[:, 0, :, 0] = xx yy = numpy.array(snapdict['auto1'], dtype=numpy.complex64) obj.data_array[:, 0, :, 1] = yy #now we have all required parameters, let's fill the extra keywords apos_dict = ata_control.get_ant_pos(ata_constants.ant_names) obj.antenna_positions = numpy.zeros((len(ata_constants.ant_names), 3), dtype=float) for ii in range(len(ata_constants.ant_names)): cant = ata_constants.ant_names[ii] obj.antenna_positions[ii][0] = apos_dict[cant][1] obj.antenna_positions[ii][1] = apos_dict[cant][0] obj.antenna_positions[ii][2] = apos_dict[cant][2] #optional arguments obj.timesys = datetime.datetime.utcfromtimestamp( snapdict['auto0_timestamp'][0]).strftime('%Y-%m-%d %H:%M:%S') if 'ra' in snapdict: obj.phase_center_ra = snapdict['ra'] / 12 * numpy.pi else: obj.phase_center_ra = 0 if 'dec' in snapdict: obj.phase_center_dec = snapdict['dec'] / 180 * numpy.pi else: obj.phase_center_dec = 0 #J2000.0 obj.phase_center_epoch = 2000.0 #now we are creating an extra keywords dictionary ek = {} ek['ata_version'] = '0.3' ek['fft_shift'] = snapdict['fft_shift'] ek['adc0_bitsnaps'] = snapdict['adc0_bitsnaps'] ek['adc1_bitsnaps'] = snapdict['adc1_bitsnaps'] ek['adc0_mean'] = snapdict['adc0_stats']['mean'] ek['adc0_dev'] = snapdict['adc0_stats']['dev'] ek['adc1_mean'] = snapdict['adc1_stats']['mean'] ek['adc1_dev'] = snapdict['adc1_stats']['dev'] ek['lfft_of0'] = len(snapdict['fft_of0']) for ii in range(len(snapdict['fft_of0'])): ek['fft_of0_' + str(ii)] = snapdict['fft_of0'][ii] ek['lfft_of1'] = len(snapdict['fft_of1']) for ii in range(len(snapdict['fft_of1'])): ek['fft_of1_' + str(ii)] = snapdict['fft_of1'][ii] ek['lauto0_of_count'] = len(snapdict['auto0_of_count']) for ii in range(len(snapdict['auto0_of_count'])): ek['auto0_of_count_' + str(ii)] = snapdict['auto0_of_count'][ii] ek['lauto1_of_count'] = len(snapdict['auto1_of_count']) for ii in range(len(snapdict['auto1_of_count'])): ek['auto1_of_count_' + str(ii)] = snapdict['auto1_of_count'][ii] ek['srate'] = snapdict['srate'] ek['fpga_clk'] = snapdict['fpga_clk'] ek['rfc'] = snapdict['rfc'] ek['ifc'] = snapdict['ifc'] ek['fpgfile'] = snapdict['fpgfile'] if not setid: ek['setid'] = -1 else: ek['setid'] = setid ek['recid'] = recid ek['ant'] = ant if 'az' in snapdict: ek['ant_az'] = snapdict['az'] else: ek['ant_az'] = 0.0 if 'el' in snapdict: ek['ant_el'] = snapdict['el'] else: ek['ant_el'] = 0.0 obj.extra_keywords = ek return obj
import pyuvdata as pyuv import numpy as np from matplotlib import cm import matplotlib.pyplot as plt UV = pyuv.UVData() UV.read_uvfits('/Users/mike_e_dubs/python_stuff/uvfits/1061313008.uvfits') baseline_time_indices = [] for m in range(UV.Nblts): if UV.ant_1_array[m] == UV.ant_2_array[m]: baseline_time_indices.append(m) UV.select(blt_inds=baseline_time_indices) fig1, ax1 = plt.subplots() fig2, ax2 = plt.subplots() Hxy = np.ma.masked_equal(np.absolute(UV.data_array[:, 0, :, 2]), 0) Hyx = np.ma.masked_equal(np.absolute(UV.data_array[:, 0, :, 3]), 0) cmap = cm.cool cmap.set_bad(color='white') caxXY = ax1.imshow(Hxy, cmap=cmap, vmin=np.amin(Hxy), vmax=np.amax(Hxy)) caxYX = ax2.imshow(Hyx, cmap=cmap, vmin=np.amin(Hyx), vmax=np.amax(Hyx)) cbarXY = fig1.colorbar(caxXY, ax=ax1) cbarYX = fig2.colorbar(caxYX, ax=ax2)
nants = max(ant_nums) + 1 antpos = np.zeros([nants, 3]) for i, ant in enumerate(ant_nums): antpos[ant, :] = rotecef_positions[i, :] / c_ns # make an aa object freqs = np.array([0.15]) beam = aipy.phs.Beam(freqs) ants = [aipy.phs.Antenna(a[0], a[1], a[2], beam) for a in antpos] aa = aipy.phs.AntennaArray(ants=ants, location=location) # loop over miriad files # XXX: DEFINE LIST OF MIRIAD FILES for fn in list_of_miriad_files: uvd = pyuvdata.UVData() uvd.read_miriad(fn) # set the telescope location uvd.telescope_location_lat_lon_alt = (cofa_loc.lat * np.pi / 180., cofa_loc.lon * np.pi / 180., cofa_loc.elevation) # loop over aa object idx = 0 antpos = np.zeros((len(aa), 3)) ants_telescope = [] for iant, ant in enumerate(aa): # test to see if antenna is "far" from center of the Earth if np.linalg.norm(ant.pos) > 1e6: # convert from ns -> m
def test_redundancy_finder(): """ Check that get_baseline_redundancies and get_antenna_redundancies return consistent redundant groups for a test file with the HERA19 layout. """ uvd = pyuvdata.UVData() uvd.read_uvfits( os.path.join(DATA_PATH, 'fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits')) uvd.select(times=uvd.time_array[0]) uvd.unphase_to_drift() # uvw_array is now equivalent to baseline positions uvtest.checkWarnings(uvd.conjugate_bls, func_kwargs={ 'convention': 'u>0', 'use_enu': True }, message=['The default for the `center`'], nwarnings=1, category=DeprecationWarning) tol = 0.05 # meters bl_positions = uvd.uvw_array pytest.raises(ValueError, uvutils.get_baseline_redundancies, uvd.baseline_array, bl_positions[0:2, 0:1]) baseline_groups, vec_bin_centers, lens = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol) baseline_groups, vec_bin_centers, lens = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol) for gi, gp in enumerate(baseline_groups): for bl in gp: bl_ind = np.where(uvd.baseline_array == bl) bl_vec = bl_positions[bl_ind] assert np.allclose(np.sqrt(np.dot(bl_vec, vec_bin_centers[gi])), lens[gi], atol=tol) # Shift the baselines around in a circle. Check that the same baselines are # recovered to the corresponding tolerance increase. # This moves one baseline at a time by a fixed displacement and checks that # the redundant groups are the same. hightol = 0.25 # meters. Less than the smallest baseline in the file. Nbls = uvd.Nbls Nshifts = 5 shift_angs = np.linspace(0, 2 * np.pi, Nshifts) base_shifts = np.stack( ((hightol - tol) * np.cos(shift_angs), (hightol - tol) * np.sin(shift_angs), np.zeros(Nshifts))).T for sh in base_shifts: for bi in range(Nbls): # Shift one baseline at a time. bl_positions_new = uvd.uvw_array bl_positions_new[bi] += sh baseline_groups_new, vec_bin_centers, lens = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions_new, tol=hightol) for gi, gp in enumerate(baseline_groups_new): for bl in gp: bl_ind = np.where(uvd.baseline_array == bl) bl_vec = bl_positions[bl_ind] assert np.allclose(np.sqrt( np.abs(np.dot(bl_vec, vec_bin_centers[gi]))), lens[gi], atol=hightol) # Compare baseline groups: a = [tuple(el) for el in baseline_groups] b = [tuple(el) for el in baseline_groups_new] assert set(a) == set(b) tol = 0.05 antpos, antnums = uvtest.checkWarnings( uvd.get_ENU_antpos, message=['The default for the `center`'], category=DeprecationWarning, nwarnings=1) baseline_groups_ants, vec_bin_centers, lens = uvutils.get_antenna_redundancies( antnums, antpos, tol=tol, include_autos=False) # Under these conditions, should see 19 redundant groups in the file. assert len(baseline_groups_ants) == 19 # Check with conjugated baseline redundancies returned u16_0 = bl_positions[16, 0] # Ensure at least one baseline has u==0 and v!=0 (for coverage of this case) bl_positions[16, 0] = 0 baseline_groups, vec_bin_centers, lens, conjugates = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True) # restore baseline (16,0) and repeat to get correct groups bl_positions[16, 0] = u16_0 baseline_groups, vec_bin_centers, lens, conjugates = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True) # Should get the same groups as with the antenna method: baseline_groups_flipped = [] for bgp in baseline_groups: bgp_new = [] for bl in bgp: ai, aj = uvutils.baseline_to_antnums(bl, uvd.Nants_telescope) if bl in conjugates: bgp_new.append( uvutils.antnums_to_baseline(aj, ai, uvd.Nants_telescope)) else: bgp_new.append( uvutils.antnums_to_baseline(ai, aj, uvd.Nants_telescope)) bgp_new.sort() baseline_groups_flipped.append(bgp_new) baseline_groups = [sorted(bgp) for bgp in baseline_groups] assert np.all( sorted(baseline_groups_ants) == sorted(baseline_groups_flipped)) for gi, gp in enumerate(baseline_groups): for bl in gp: bl_ind = np.where(uvd.baseline_array == bl) bl_vec = bl_positions[bl_ind] if bl in conjugates: bl_vec *= (-1) assert np.isclose(np.sqrt(np.dot(bl_vec, vec_bin_centers[gi])), lens[gi], atol=tol)
import plot_lib from matplotlib import cm from matplotlib.ticker import AutoMinorLocator FHD_dir = '/Users/mike_e_dubs/MWA/FHD/fhd_mjw_Aug23_Jan2018/' obs_str = '1061313128_f181.2_f187.5_t30_t36' suffixes = [ 'vis_XX.sav', 'vis_YY.sav', 'vis_model_XX.sav', 'vis_model_YY.sav', 'flags.sav' ] fhd_files = [ '%s/vis_data/%s_%s' % (FHD_dir, obs_str, suffix) for suffix in suffixes ] fhd_files.append('%s/metadata/%s_params.sav' % (FHD_dir, obs_str)) UV1 = pyuvdata.UVData() UV2 = pyuvdata.UVData() UV1.read_fhd(fhd_files, use_model=True) UV2.read_uvfits( '/Users/mike_e_dubs/MWA/Data/smaller_uvfits/1061313128_f181.2_f187.5_t30_t36.uvfits' ) fhd_flags = np.sum(np.reshape(UV1.flag_array, [UV2.Ntimes, UV2.Nbls, 1, UV2.Nfreqs, 2]), axis=1) dirty_flags = np.sum(np.reshape(UV2.flag_array[:, :, :, :2], [UV2.Ntimes, UV2.Nbls, 1, UV2.Nfreqs, 2]), axis=1) fig1, ax1 = plt.subplots(figsize=(14, 8), nrows=2) fig1.suptitle('Model Flags')
import pyuvdata UV = pyuvdata.UVData() UV.read_miriad( '/Users/mike_e_dubs/python_stuff/miriad/temp_HERA_data/zen.2457555.40356.xx.HH.uvc' ) UV.write_uvfits( '/Users/mike_e_dubs/python_stuff/uvfits/zen.2457555.40356.xx.HH.uvfits', force_phase=True, spoof_nonessential=True)
def test_redundancy_finder(): """ Check that get_baseline_redundancies and get_antenna_redundancies return consistent redundant groups for a test file with the HERA19 layout. """ uvd = pyuvdata.UVData() uvd.read_uvfits( os.path.join( DATA_PATH, 'hera19_8hrs_uncomp_10MHz_000_05.003111-05.033750.uvfits')) uvd.select(times=uvd.time_array[0]) uvd.unphase_to_drift( use_ant_pos=True) # uvw_array is now equivalent to baseline positions tol = 0.05 # meters bl_positions = uvd.uvw_array nt.assert_raises(ValueError, uvutils.get_baseline_redundancies, uvd.baseline_array, bl_positions[0:2, 0:1]) baseline_groups, vec_bin_centers, lens = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol) baseline_groups, vec_bin_centers, lens = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol) for gi, gp in enumerate(baseline_groups): for bl in gp: bl_ind = np.where(uvd.baseline_array == bl) bl_vec = bl_positions[bl_ind] nt.assert_true( np.allclose(np.sqrt(np.dot(bl_vec, vec_bin_centers[gi])), lens[gi], atol=tol)) # Now jostle the baselines around by up to 0.25m and see if we can recover the same redundancies to that tolerance. tol = 0.25 # meters. Less than the smallest baseline in the file. Nbls = uvd.Nbls shift_dists = np.random.uniform(low=0.0, high=tol / 2., size=Nbls) shift_angs = np.random.uniform(low=0.0, high=2 * np.pi, size=Nbls) shift_vecs = np.stack( (shift_dists * np.cos(shift_angs), shift_dists * np.sin(shift_angs), np.zeros(Nbls))).T bl_positions_new = uvd.uvw_array + shift_vecs baseline_groups_new, vec_bin_centers, lens = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions_new, tol=tol) for gi, gp in enumerate(baseline_groups_new): for bl in gp: bl_ind = np.where(uvd.baseline_array == bl) bl_vec = bl_positions[bl_ind] nt.assert_true( np.allclose(np.sqrt(np.abs(np.dot(bl_vec, vec_bin_centers[gi]))), lens[gi], atol=tol)) # Compare baseline groups: for c, blg in enumerate(baseline_groups): bl = blg[0] ind = np.sum(np.where([bl in gp for gp in baseline_groups_new])) nt.assert_equal(baseline_groups_new[ind], blg) tol = 0.05 antpos, antnums = uvd.get_ENU_antpos() baseline_groups_ants, vec_bin_centers, lens = uvutils.get_antenna_redundancies( antnums, antpos, tol=tol, include_autos=True) # Under these conditions, should see 31 redundant groups in the file. nt.assert_equal(len(baseline_groups_ants), 31) # Check with conjugated baseline redundancies returned u16_0 = bl_positions[16, 0] bl_positions[ 16, 0] = 0 # Ensure at least one baseline has u==0 and v!=0 (for coverage of this case) baseline_groups, vec_bin_centers, lens, conjugates = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True) # restore baseline (16,0) and repeat to get correct groups bl_positions[16, 0] = u16_0 baseline_groups, vec_bin_centers, lens, conjugates = uvutils.get_baseline_redundancies( uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True) # Should get the same groups as with the antenna method: baseline_groups_flipped = [] for bgp in baseline_groups: bgp_new = [] for bl in bgp: ai, aj = uvutils.baseline_to_antnums(bl, uvd.Nants_telescope) if bl in conjugates: bgp_new.append( uvutils.antnums_to_baseline(aj, ai, uvd.Nants_telescope)) else: bgp_new.append( uvutils.antnums_to_baseline(ai, aj, uvd.Nants_telescope)) bgp_new.sort() baseline_groups_flipped.append(bgp_new) baseline_groups = [sorted(bgp) for bgp in baseline_groups] nt.assert_true( np.all( sorted(baseline_groups_ants) == sorted(baseline_groups_flipped))) for gi, gp in enumerate(baseline_groups): for bl in gp: bl_ind = np.where(uvd.baseline_array == bl) bl_vec = bl_positions[bl_ind] if bl in conjugates: bl_vec *= (-1) nt.assert_true( np.isclose(np.sqrt(np.dot(bl_vec, vec_bin_centers[gi])), lens[gi], atol=tol))