def build_redundant(mode='sep', aa=None, cal=None, sep=-1, restore="nofile", length=None, min_count=2): if cal is None and restore is None: raise NameError("Either cal or restore, or both, must be defined.") if os.path.isfile(restore): cal = None fil = np.load(restore) reds = fil['reds'] if 'lens' in fil.keys(): lens = fil['lens'] if ((cal is not None) or (aa is not None)) and (not os.path.isfile(restore)): if aa is None: aa = a.cal.get_aa(cal, 1024 / 100e6, .1, 1024) info = omni.aa_to_info(aa) reds = info.get_reds() reds = [gp for gp in reds if len(gp) >= int(min_count)] lens = [] for gp in reds: i, j = gp[0] bl = aa.get_baseline(i, j) #baseline vector in light ns lens.append( np.sqrt(sum(i**2 for i in bl)) * a.const.len_ns / 100.) #Convert to meters reds = np.array(reds) lens = np.array(lens) if mode == 'lengths': lens = np.unique(np.round(lens, 3)) return ','.join(lens) tolerance = 0.50 if float( length) > 0 else min(lens) + 0.50 #50 cm good enough? if not length is None: ### Select only groups of baselines with a given length. reds = reds[np.where(np.abs(lens - float(length)) < tolerance)] if not restore is None and not restore == 'nofile': np.savez(restore, reds=reds, lens=lens) if mode == 'flatten': return ",".join([ str(it[0]) + "_" + str(it[1]) for sublist in reds for it in sublist ]) if mode == 'count': return len(reds) if mode == 'sep': if sep == -1: raise ValueError("Choose a valid redundant group index.") return [str(it[0]) + "_" + str(it[1]) for it in reds[int(sep)]]
def build_redundant(mode='sep', aa=None, cal=None, sep=-1, restore="nofile", length=None, min_count=2): if cal is None and restore is None: raise NameError("Either cal or restore, or both, must be defined.") if os.path.isfile(restore): cal = None fil=np.load(restore) reds=fil['reds'] if 'lens' in fil.keys(): lens = fil['lens'] if ((cal is not None) or (aa is not None)) and (not os.path.isfile(restore)): if aa is None: aa = a.cal.get_aa(cal, 1024/100e6, .1, 1024) info = omni.aa_to_info(aa) reds = info.get_reds() reds = [gp for gp in reds if len(gp) >= int(min_count)] lens = [] for gp in reds: i,j = gp[0] bl = aa.get_baseline(i,j) #baseline vector in light ns lens.append(np.sqrt(sum(i**2 for i in bl)) * a.const.len_ns / 100.) #Convert to meters reds = np.array(reds) lens = np.array(lens) if mode == 'lengths': lens = np.unique(np.round(lens,3)) return ','.join(lens) tolerance=0.50 if float(length)>0 else min(lens)+0.50 #50 cm good enough? if not length is None: ### Select only groups of baselines with a given length. reds=reds[np.where(np.abs(lens - float(length)) < tolerance)] if not restore is None and not restore == 'nofile': np.savez(restore,reds=reds,lens=lens) if mode == 'flatten': return ",".join([str(it[0])+"_"+str(it[1]) for sublist in reds for it in sublist]) if mode == 'count': return len(reds) if mode == 'sep': if sep == -1: raise ValueError("Choose a valid redundant group index.") return [str(it[0])+"_"+str(it[1]) for it in reds[int(sep)]]
#hera info assuming a hex of 19 and 128 antennas aa = a.cal.get_aa(opts.cal, n.array([.150])) ex_ants = [] ubls = [] for a in opts.ex_ants.split(','): try: ex_ants.append(int(a)) except: pass for bl in opts.ubls.split(','): try: i,j = bl.split('_') ubls.append((int(i),int(j))) except: pass print 'Excluding Antennas:',ex_ants if len(ubls) != None: print 'Using Unique Baselines:',ubls info = omni.aa_to_info(aa, fcal=True, ubls=ubls, ex_ants=ex_ants) reds = flatten_reds(info.get_reds()) print 'Number of redundant baselines:',len(reds) #Read in data here. ant_string =','.join(map(str,info.subsetant)) bl_string = ','.join(['_'.join(map(str,k)) for k in reds]) times, data, flags = arp.get_dict_of_uv_data(args, bl_string, opts.pol, verbose=True) datapack,wgtpack = {},{} for (i,j) in data.keys(): datapack[(i,j)] = data[(i,j)][opts.pol] wgtpack[(i,j)] = np.logical_not(flags[(i,j)][opts.pol]) nfreq = datapack[datapack.keys()[0]].shape[1] #XXX less hacky than previous hardcode, but always safe? fqs = n.linspace(.1,.2,nfreq) dlys = n.fft.fftshift(n.fft.fftfreq(fqs.size, np.diff(fqs)[0]))
s2 = {} for k,i in s.iteritems(): s2[str(k)] = omni.get_phase(f,i) s2['pol'] = pol n.savez('fcgains.%s.npz'%pol,**s2) def normalize_data(datadict): d = {} for key in datadict.keys(): d[key] = datadict[key]/n.where(n.abs(datadict[key]) == 0., 1., n.abs(datadict[key])) return d #hera info assuming a hex of 19 and 128 antennas aa = a.cal.get_aa(opts.cal, n.array([.150])) info = omni.aa_to_info(aa, fcal=True, ex_ants=[81]) infotest = omni.aa_to_info(aa, fcal=True, ubls=[(80,104),(9,22),(80,96)],ex_ants=[81]) #info = hx.hera_to_info(3, 128, connections=connection_file, ex_ants=[81]) #infotest = hx.hera_to_info(3, 128, connections=connection_file, ex_ants=[81]) #infotest = hx.hera_to_info(3, 128, connections=connection_file, ubls=[(80,104),(9,22),(80,96)], ex_ants=[81]) reds = flatten_reds(info.get_reds()) redstest = infotest.get_reds()#for plotting print len(reds) #Read in data here. ant_string =','.join(map(str,info.subsetant)) bl_string = ','.join(['_'.join(map(str,k)) for k in reds]) times, data, flags = arp.get_dict_of_uv_data(args, bl_string, opts.pol, verbose=True) dataxx = {} for (i,j) in data.keys(): dataxx[(i,j)] = data[(i,j)]['xx']
#hera info assuming a hex of 19 and 128 antennas aa = a.cal.get_aa(opts.cal, n.array([.150])) ex_ants = [] ubls = [] for a in opts.ex_ants.split(','): try: ex_ants.append(int(a)) except: pass for bl in opts.ubls.split(','): try: i,j = bl.split('_') ubls.append((int(i),int(j))) except: pass print 'Excluding Antennas:',ex_ants if len(ubls) != None: print 'Using Unique Baselines:',ubls info = omni.aa_to_info(aa, fcal=True, ubls=ubls, ex_ants=ex_ants) reds = flatten_reds(info.get_reds()) print 'Number of redundant baselines:',len(reds) #Read in data here. ant_string =','.join(map(str,info.subsetant)) bl_string = ','.join(['_'.join(map(str,k)) for k in reds]) times, data, flags = arp.get_dict_of_uv_data(args, bl_string, opts.pol, verbose=True) datapack = {} #not necessarily xx data inside for (i,j) in data.keys(): datapack[(i,j)] = data[(i,j)][opts.pol] nfreq = datapack[datapack.keys()[0]].shape[1] #XXX less hacky than previous hardcode, but always safe? fqs = n.linspace(.1,.2,nfreq) dlys = n.fft.fftshift(n.fft.fftfreq(fqs.size, np.diff(fqs)[0])) #gets phase solutions per frequency.
dts.append(dt); offs.append(off) return np.array(dts), np.array(offs) o = optparse.OptionParser() o.add_option('--visualize', action='store_true', default=False) o.add_option('--plot', action='store_true', default=False) opts,args = o.parse_args(sys.argv[1:]) filename = args #uv file with first cal solutions applied to it. aa = a.cal.get_aa('hsa7458_v000_HH_delaytest', np.array([.150])) fqs = np.linspace(.1,.2,1024) valid = np.ones_like(fqs) #only fit phase to within this frequency range. valid = np.logical_and(valid, np.logical_and(fqs>.11,fqs<.19)) info = omni.aa_to_info(aa) reds = flatten_reds(info.get_reds()) antstr = zsa.list2str(reds) integration = 0 # testing for a single integration pol='xx' times, data, flags = miriad.read_files(filename, antstr, pol) #wh = aa.antpos_ideal[:,2]!=-1 #ants = np.arange(len(aa.ants)) #ants = ants[wh] bs = [] xs = [] ys = [] ds = [] os = [] for bl in data.keys():
cmd = sys.argv s2['cmd'] = ' '.join(cmd) n.savez('%s.npz'%name,**s2) def normalize_data(datadict): d = {} for key in datadict.keys(): d[key] = datadict[key]/n.where(n.abs(datadict[key]) == 0., 1., n.abs(datadict[key])) return d #hera info assuming a hex of 19 and 128 antennas aa = a.cal.get_aa(opts.cal, n.array([.150])) bad_ants = [ant for ant in map(int,opts.ex_ants)] info = omni.aa_to_info(aa, fcal=True, ex_ants=bad_ants) reds = flatten_reds(info.get_reds()) print len(reds) #Read in data here. ant_string =','.join(map(str,info.subsetant)) bl_string = ','.join(['_'.join(map(str,k)) for k in reds]) times, data, flags = arp.get_dict_of_uv_data(args, bl_string, opts.pol, verbose=True) dataxx = {} for (i,j) in data.keys(): dataxx[(i,j)] = data[(i,j)]['xx'] fqs = n.linspace(.1,.2,1024) dlys = n.fft.fftshift(n.fft.fftfreq(fqs.size, fqs[1]-fqs[0])) #gets phase solutions per frequency. fc = omni.FirstCal(dataxx,fqs,info)
# (64,72), # (80,64), # (88,64), # (96,64), # (104,64), # (9,64), # (22,64), # (20,64), # (43,64), # (53,64), # (31,64)] ] aa = a.cal.get_aa(opts.cal, n.array([.150])) if opts.ex_ants: bad_ants = [ant for ant in map(int,opts.ex_ants.split(','))] else: bad_ants = [] info = omni.aa_to_info(aa, fcal=True, ex_ants=bad_ants) reds = flatten_reds(info.get_reds()) print len(reds) #Read in data here. ant_string =','.join(map(str,info.subsetant)) bl_string = ','.join(['_'.join(map(str,k)) for k in reds]) times, data, flags = arp.get_dict_of_uv_data(args, bl_string, opts.pol, verbose=True) dataxx = {} wgtsxx = {} for (i,j) in data.keys(): dataxx[(i,j)] = data[(i,j)]['xx']#[0:1,:] wgtsxx[(i,j)] = n.logical_not(flags[(i,j)]['xx'])#[0:1,:]) fqs = n.linspace(.1,.2,1024) dlys = n.fft.fftshift(n.fft.fftfreq(fqs.size, fqs[1]-fqs[0]))
if not os.path.exists(xfile): xfile = '%f.xtalk.npz' % jd if not os.path.exists(xfile): print xfile, 'does not exist. Skipping...' continue print ' using', xfile xtalk = np.load(xfile) else: guess, cnt, xtalk = {}, {}, {} if opts.omnimdl: #if pass in omnical file, load in the mdl omnifile = opts.omnimdl % '.'.join( filename.split('/')[-1].split('.')[0:4]) m, _, mdl, _ = omni.from_npz(omnifile) #make info object so that we can get a mapping of mdl bls to all bls. aa = a.cal.get_aa(opts.cal, np.array([.150])) info = omni.aa_to_info(aa) redmapping = {} for k in mdl[a.miriad.pol2str[uv['pol']]].keys(): for gp in info.get_reds(): if k in gp: for kk in gp: redmapping[kk] = k for (uvw, t, (i, j)), d, f in uv.all(raw=True): ti = np.where(m['jds'] == t)[0] bl = str(a.pol.ijp2blp(i, j, uv['pol'])) if not guess.has_key(bl): guess[bl], cnt[bl] = 0, 0 if (i, j) in [(97, 80), (80, 97), (72, 96), (96, 72), (43, 88), (88, 43)]: if opts.verbose: print 'No Model Visibility for {0}'.format((i, j)) ml = np.zeros_like(d)