コード例 #1
0
def load_other():
    dsets_other = {}
    if opts.Clongtime:
        dsets_final = {}
        for k in dsets:
            firstfile = dsets[k][0]
            dsets_final[k] = glob.glob(
                '/'.join(firstfile.split('/')[:-1]) + '/lst.*' +
                firstfile.split('.')[-1])  #full time range
    else:
        dsets_final = dsets
    for k in dsets_final:
        dsets_other[k] = []
        for file in dsets_final[k]:
            if opts.Cfg:
                dsets_other[k].append(
                    '../../lstbin_fg/' + file.split('/')[1] + '/' +
                    file.split('/')[-1][:-1])  #fg containing data
            elif opts.CnoFRF:
                dsets_other[k].append(file[:-1])  #gets rid of 'L' on filename
            elif opts.otherbls != None:
                oldsep = filter(lambda x: 'sep' in x, file.split('/'))[0]
                newsep = oldsep.split('p')[0] + 'p' + opts.otherbls[1:-1]
                dsets_other[k].append(file.replace(oldsep, newsep))
            elif opts.Clongtime != None:
                dsets_other[k].append(file)
    data_dict_other = {}
    flg_dict_other = {}
    conj_dict_other = {}
    lsts_other, data_other, flgs_other = {}, {}, {}
    keys_other = []
    print 'Reading in other set of data to estimate C'
    for k in days:
        lsts_other[k], data_other[k], flgs_other[k] = capo.miriad.read_files(
            dsets_other[k], antstr=antstr, polstr=POL, verbose=True)
        lsts_other[k] = n.array(lsts_other[k]['lsts'])
        for bl in data_other[k]:
            d = n.array(
                data_other[k][bl][POL])[:,
                                        chans] * jy2T  #extract frequency range
            flg = n.array(flgs_other[k][bl][POL])[:, chans]
            key_other = (k, bl, POL)
            keys_other.append(key_other)
            data_dict_other[key_other] = d
            flg_dict_other[key_other] = n.logical_not(flg)
            conj_dict_other[key_other[1]] = conj[key_other[1]]
    ds_other = oqe.DataSet()
    inds = oqe.lst_align(lsts_other)
    data_dict, flg_dict, lsts = oqe.lst_align_data(inds,
                                                   dsets=data_dict_other,
                                                   wgts=flg_dict_other,
                                                   lsts=lsts_other)
    ds_other.set_data(dsets=data_dict_other,
                      conj=conj_dict_other,
                      wgts=flg_dict_other)
    return keys_other, ds_other
コード例 #2
0
ファイル: pspec_oqe_2d.py プロジェクト: adampbeardsley/capo
def load_other():
    dsets_other = {}
    if opts.Clongtime:
        dsets_final = {}
        for k in dsets:
            firstfile = dsets[k][0]
            dsets_final[k] = glob.glob('/'.join(firstfile.split('/')[:-1])+'/lst.*'+firstfile.split('.')[-1]) #full time range
    else: dsets_final = dsets
    for k in dsets_final: 
        dsets_other[k] = []
        for file in dsets_final[k]:
            if opts.Cfg: dsets_other[k].append('../../lstbin_fg/'+file.split('/')[1] + '/' + file.split('/')[-1][:-1]) #fg containing data
            elif opts.CnoFRF: dsets_other[k].append(file[:-1]) #gets rid of 'L' on filename
            elif opts.otherbls != None:
                oldsep = filter(lambda x: 'sep' in x, file.split('/'))[0]
                newsep = oldsep.split('p')[0]+'p'+opts.otherbls[1:-1]
                dsets_other[k].append(file.replace(oldsep,newsep))
            elif opts.Clongtime != None: dsets_other[k].append(file) 
    data_dict_other = {}
    flg_dict_other = {}
    conj_dict_other = {}
    lsts_other,data_other,flgs_other = {},{},{}
    keys_other = []
    print 'Reading in other set of data to estimate C'
    for k in days:
        lsts_other[k],data_other[k],flgs_other[k] = capo.miriad.read_files(dsets_other[k], antstr=antstr, polstr=POL,verbose=True)
        lsts_other[k] = n.array(lsts_other[k]['lsts'])
        for bl in data_other[k]:
            d = n.array(data_other[k][bl][POL])[:,chans] * jy2T  #extract frequency range
            flg = n.array(flgs_other[k][bl][POL])[:,chans]
            key_other = (k,bl,POL)
            keys_other.append(key_other)
            data_dict_other[key_other] = d
            flg_dict_other[key_other] = n.logical_not(flg)
            conj_dict_other[key_other[1]] = conj[key_other[1]]
    ds_other = oqe.DataSet()
    inds = oqe.lst_align(lsts_other)
    data_dict,flg_dict,lsts = oqe.lst_align_data(inds,dsets=data_dict_other,wgts=flg_dict_other,lsts=lsts_other)
    ds_other.set_data(dsets=data_dict_other,conj=conj_dict_other,wgts=flg_dict_other)
    return keys_other, ds_other
コード例 #3
0
ファイル: pspec_oqe_2d.py プロジェクト: SaulAryehKohn/capo
        n_ = make_noise(d, stats[k]['cnt'][:, chans], inttime, sdf)
        flg = n.array(flgs[k][bl][POL])[:, chans] # extract freq range
        key = (k, bl, POL)
        data_dict_v[key] = d
        data_dict_n[key] = n_
        flg_dict[key] = n.logical_not(flg)
        conj_dict[key[1]] = conj[bl]
keys = data_dict_v.keys()
bls_master = []
for key in keys:  # populate list of baselines
    if key[0] == keys[0][0]:
        bls_master.append(key[1])
print 'Baselines:', len(bls_master)

# Align dataset
inds = oqe.lst_align(lsts)
data_dict_v, flg_dict, lsts = oqe.lst_align_data(inds, dsets=data_dict_v,
                                                 wgts=flg_dict, lsts=lsts)
data_dict_n = oqe.lst_align_data(inds, dsets=data_dict_n)[0]
nlst = data_dict_v[keys[0]].shape[0]
# the lsts given is a dictionary with 'even','odd', etc.

# Save some information
cnt_full = stats[stats.keys()[0]]['cnt'][inds[stats.keys()[0]]]
cnt_full = cnt_full[:, chans]
lsts = lsts[lsts.keys()[0]]
# calculate the effective number of counts used in the data
cnt_eff = 1./n.sqrt(n.ma.masked_invalid(1./cnt_full**2).mean())
# calculate the effective number of baselines given grouping:
N = len(bls_master)
nbls = N
コード例 #4
0
        d = n.array(data[k][bl][POL])[:,
                                      chans] * jy2T  #extract frequency range
        flg = n.array(flgs[k][bl][POL])[:, chans]
        key = (k, bl, POL)
        data_dict[key] = d
        flg_dict[key] = n.logical_not(flg)
        conj_dict[key[1]] = conj[bl]
keys = data_dict.keys()
bls_master = []
for key in keys:  #populate list of baselines
    if key[0] == keys[0][0]: bls_master.append(key[1])
print 'Baselines:', len(bls_master)

#Align and create dataset
ds = oqe.DataSet(lmode=LMODE)
inds = oqe.lst_align(lsts)
data_dict, flg_dict, lsts = oqe.lst_align_data(
    inds, dsets=data_dict, wgts=flg_dict, lsts=lsts
)  #the lsts given is a dictionary with 'even','odd', etc., but the lsts returned is one array

#If data is replaced by noise
if opts.noise_only:
    if opts.same == None and opts.diff == None:
        print 'Need to specify if noise is the same on all baselines (--same) or different (--diff)'
        sys.exit()
    #Prep FRF Stuff
    ij = bls_master[0]  #ij = (1,4)
    if blconj[a.miriad.ij2bl(
            ij[0], ij[1]
    )]:  #makes sure FRP will be the same whether bl is a conjugated one or not
        if ij[0] < ij[1]: