コード例 #1
0
ファイル: calb.py プロジェクト: vlslv/myPYTHONstuff
def lee4sst(fileplace1,fileplace2,fileplace3,fileplace4):
    """
    Lee archivos '/path/to/file/bi1yymmdd.save' o 
    '/path/to/file/rs1yymmdd.hh00.save' y devuelve un
    vector-tiempo (fds) y la estructura completa de da-
    tos (biors) de dos archivos consecutivos y concatenados.
    """

    pathplusfile1,pathplusfile2,pathplusfile3,pathplusfile4 = fileplace1,fileplace2,fileplace3,fileplace4 
    biors1 = readsav(pathplusfile1,python_dict=False,verbose=False)
    biors2 = readsav(pathplusfile2,python_dict=False,verbose=False)
    biors3 = readsav(pathplusfile3,python_dict=False,verbose=False)
    biors4 = readsav(pathplusfile4,python_dict=False,verbose=False)
    yyyy = np.int(pathplusfile1[37:39])+2000
    mm = np.int(pathplusfile1[39:41])
    dd = np.int(pathplusfile1[41:43])
    obsday = int(datetime(yyyy,mm,dd,0,0).strftime('%s'))
    biors1234= AttrDict({'pm_daz':[],'off':[],'azierr':[],'eleerr':[],'x_off':[],'elepos':[],'azipos':[],'pos_time':[],
    'recnum':[],'opmode':[],'time':[],'pm_del':[],'gps_status':[],'adcval':[],'y_off':[],'target':[]})
    for items in biors1:
        a,b,c,d = biors1[items],biors2[items],biors3[items],biors4[items]
        biors1234[items] = np.concatenate((a,b,c,d))
    thatday = obsday + biors1234['time']/1.e4
    dts = map(datetime.fromtimestamp,thatday)
    fds = dates.date2num(dts)
    return fds,biors1234
def cos_lsf_new(lam,version):
    if lam < 1800.0:
        if lam > 1450.0:
            chan ='g160m'
        else:
            chan = 'g130m'
    else:
        chan = 'g225m'

    if (version == 'new' and ((chan == 'g130m') or (chan == 'g160m'))):
        q = readsav('C:/Users/Will Evonosky/Dropbox/SOARS 2016/Programs/H2 Fluoresence Code/cos_lsf_new.idl')
        #print '/Users/Matt/IDLWorkspace80/COS_FIT/cos_lsf_new2.idl'
    else:
        q = readsav('C:/Users/Will Evonosky/Dropbox/SOARS 2016/Programs/H2 Fluoresence Code/cos_lsf.idl')
        #print '/Users/Matt/IDLWorkspace80/COS_FIT/cos_lsf2.idl'

    chan = np.where(chan == q.lsfchan)
    chan = int(chan[0])

    #print np.shape(q.lsf)
    lamind = np.argmin(np.abs(q.lsfwave[chan,:]-lam))
    lsfstart = q.lsf[:,lamind,chan]
    lsf = lsfstart.T

    xarray = lam+q.lsfpix*0.001*q.lsfpixscale[chan]
    return (lsf,xarray)                   
コード例 #3
0
ファイル: misc.py プロジェクト: YSOVAR/YSOVAR
def read_cluster_grinder(filepath):
    ''' Import Robs Spitzer data

    read Rob's IDL format and make it into a a catalog, 
    deleting multiple columns and adding identifiers
    
    Parameters
    ----------
    filepath : string
        Path to a directory that holds the output of the ClusterGrinder
        pipeline. All files need to have standard names.
        Specifically, this routine reads:
        
            - ``cg_merged_srclist_mips.sav``
            - ``cg_classified.sav``

    Returns
    -------
    cat : astropy.table.Table
        Table with 2MASS ans Spitzer magnitudes and the clustergrinder 
        classification.
    '''
    s = readsav(os.path.join(filepath, 'cg_merged_srclist_mips.sav'))
    coo=np.ma.array(s.out[:,0:20],mask=(s.out[:,0:20] == 0.))
    s.out[:,20:30][np.where(s.out[:,20:30] < -99)] = np.nan
    s.out[:,30:40][np.where(s.out[:,30:40]==10)] = np.nan
    
    dat = Table()
    dat.add_column(Column(name='RA', data=np.ma.mean(coo[:,[0,2,4,12,14,16,18]],axis=1), unit = 'deg', format = '9.6g'))   
    #RA is avarage of all valid (non-zero) Ra values in 2MASS JHK, IRAC 1234
    dat.add_column(Column(name='DEC', data=np.ma.mean(coo[:,[1,3,5,13,15,17,19]],axis=1), unit='deg', format='+9.6g'))

    robsyukyformat={'J_MAG': 20,'H_MAG': 21, 'K_MAG': 22,'J_ERR': 30,
                    'H_ERR': 31,'K_ERR': 32,'IRAC_1': 26,'IRAC_2': 27, 
                    'IRAC_3': 28, 'IRAC_4': 29,'IRAC_1_ERR':36,'IRAC_2_ERR':37,
                    'IRAC_3_ERR':38, 'IRAC_4_ERR':39}
    for col in robsyukyformat:
        dat.add_column(Column(name=col, data=s.out[:, robsyukyformat[col]], unit='mag', format='4.2g'))

    s.mips[:,2][np.where(s.mips[:,2] == -100)] = np.nan
    s.mips[:,3][np.where(s.mips[:,3] == 10)] = np.nan
    dat.add_column(Column(name='MIPS', data=s.mips[:,2], unit='mag', format='4.2g'))
    dat.add_column(Column(name='MIPS_ERR',data=s.mips[:,3], unit='mag', format='4.2g'))

    IRclass = readsav(os.path.join(filepath, 'cg_classified.sav'))
    dat.add_column(Column(name='IRclass', dtype='|S5', length=len(dat)))
    for n1, n2 in zip(['wdeep', 'w1','w2','wtd','w3'], ['I*', 'I', 'II', 'II*', 'III']):
        if n1 in IRclass:
            dat['IRclass'][IRclass[n1]] = n2
    dat.add_column(Column(name='AK', data=IRclass.ak, unit='mag', format='4.2g'))
   
    return dat
コード例 #4
0
ファイル: cut_bin_ps.py プロジェクト: jpober/brownscripts
def ps_cut(power_2d,power_diff,bin_file=None,thresh=1e7,kperp_min_cut=0.003,kperp_max_cut=0.05,kpara_min_cut=0.11,kpara_max_cut=1.08,coarse_band_extent=2):
    d = readsav(power_2d)
    diff = readsav(power_diff)
    kperp = d['kperp_edges']
    kx = diff['kx_mpc']
    ky = diff['ky_mpc']
    kz = diff['kz_mpc']
    h = diff['hubble_param']
    k = np.zeros((kx.size,kx.size))
    for ii in range(kx.size):
        for jj in range(ky.size):
            k[jj][ii]=np.sqrt(kx[ii]**2+ky[jj]**2)
    k=k.flatten()
#dat = organize_power(d,'power_3d')
#dif = organize_power(diff,'power_diff')
    if bin_file is None:
        print "find pixels using model"
        q = np.logical_and(d['power']<thresh,d['power']>0)
    else: 
        fbin = readsav(bin_file)
        q = np.array(fbin['bin_1to2d']).astype(bool)
    r = np.zeros((kz.size,kx.size*ky.size),dtype=bool)
    n = np.digitize(k,kperp)
    q[:,np.where(kperp[:-1] > kperp_max_cut)[0]] = False
    q[:,np.where(kperp[1:] < kperp_min_cut)[0]] = False
    q[np.where(kz > kpara_max_cut)[0]] = False
    q[np.where(kz < kpara_min_cut)[0]] = False
    q[:,12] = False
    q[:,13] = False
    q[:,21] = False
    for ii in range(coarse_band_extent):
        q[24+ii::24] = False
        q[24-ii::24] = False
    for nn in range(kperp.size-1):  
        if kperp[nn] > kperp_max_cut: continue
        if kperp[nn] < kperp_min_cut: continue          
        for zi in range(kz.size): 
            if kz[zi] < kpara_min_cut: continue
            if kz[zi] > kpara_max_cut: continue
            if q[zi][nn]:             
                ind0 = np.where(n==nn+1)
                r[zi][ind0[0]] = True
#    return r
    dif = diff['power_diff']*(h**3)
    wgt = diff['weight_diff']
    #wgt /= np.sum(wgt)
    r = r.reshape(dif.shape)   
    ind = np.where(r)
    cut = dif[ind]#*(wgt[ind]>10.*np.mean(wgt))
    #n0ind = np.nonzero(cut)
    kzpower = np.divide(np.sum(dif*wgt*r,axis=(1,2)),np.sum(wgt*r,axis=(1,2)))
    return q,d['power']*(h**3), cut, wgt[ind], kzpower, kz/h
コード例 #5
0
ファイル: cut_ps_v2.py プロジェクト: jpober/brownscripts
def plotlimit(f1,f2,sz=(10,10)):
    fig,axs=plt.subplots(3,2,sharex=True,sharey=True,figsize=sz)  
    deor = readsav('/users/wl42/IDL/FHD/catalog_data/eor_power_1d.idlsave')
    keor = deor['k_centers']
    peor = deor['power'] 
    z=['7.1','6.8','6.5'] 
    pol=['E-W','N-S']
    for ii in range(6): 
        ix=ii/2
        iy=ii%2
        fb=f1[ii]
        fl=f2[ii] 
        d=readsav(fl) 
        h=d['hubble_param']
        k0=keor/h 
        p0=peor*keor**3/2/np.pi**2 
        kb,pb,pbup,sb=get_1d_limit(fb)
        kl,pl,plup,sl=get_1d_limit(fl)
        pl[np.where(pl==0)]=np.nan
        axs[ix][iy].set_xlim(0.15,1.2) 
        axs[ix][iy].set_ylim(10,1e7)
        axs[ix][iy].set_title('z='+z[ix]+' '+pol[iy])
        axs[ix][iy].set_xscale('log')
        axs[ix][iy].set_yscale('log') 
        m1,m2,m3=None,None,None 
        if iy==0: axs[ix][iy].set_ylabel('$\Delta^2$ ($mK^2$)')
        if ix==2: axs[ix][iy].set_xlabel('$k$ ($h$ $Mpc^{-1}$)')
        if ii==1: 
            m1='Fiducial Theory'
            m2='2016 2-$\sigma$ Upper Limit' 
            m3='2016 Noise Level' 
        axs[ix][iy].step(k0,p0,where='mid',c='r',label=m1) 
        axs[ix][iy].step(kb,pbup,where='mid',c='c',label=m2)
        axs[ix][iy].step(kb,sb,where='mid',c='c',linestyle='--',label=m3)
        l1,l2,l3=None,None,None
        if ii==0:
            l1='Measured Power' 
            l2='Noise Level'
            l3='2-$\sigma$ Upper Limit'
        axs[ix][iy].step(kl,pl,where='mid',c='k',label=l1) 
        axs[ix][iy].step(kl,sl,where='mid',c='k',linestyle='--',label=l2) 
        axs[ix][iy].step(kl,plup,where='mid',c='indigo',label=l3)
        axs[ix][iy].fill_between(xtostep(kl),ytostep(pl)-2*ytostep(sl),ytostep(plup),color='silver',alpha=0.8)
        axs[ix][iy].grid(axis='y') 
    plt.subplots_adjust(top=0.94,bottom=0.12,left=0.07,right=0.965,hspace=0.21,wspace=0.005)
    axs[0][0].legend(loc=2,ncol=3,fontsize='x-small')
    axs[0][1].legend(loc=2,ncol=3,fontsize='x-small')
    plt.show()
コード例 #6
0
ファイル: test_idl.py プロジェクト: hitej/meta-core
    def test_arrays_replicated_3d(self):
        s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)

        # Check column types
        assert_(s.arrays_rep.a.dtype.type is np.object_)
        assert_(s.arrays_rep.b.dtype.type is np.object_)
        assert_(s.arrays_rep.c.dtype.type is np.object_)
        assert_(s.arrays_rep.d.dtype.type is np.object_)

        # Check column shapes
        assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.d.shape, (4, 3, 2))

        # Check values
        for i in range(4):
            for j in range(3):
                for k in range(2):
                    assert_array_identical(s.arrays_rep.a[i, j, k],
                                           np.array([1, 2, 3], dtype=np.int16))
                    assert_array_identical(s.arrays_rep.b[i, j, k],
                                           np.array([4., 5., 6., 7.],
                                                    dtype=np.float32))
                    assert_array_identical(s.arrays_rep.c[i, j, k],
                                           np.array([np.complex64(1+2j),
                                                     np.complex64(7+8j)]))
                    assert_array_identical(s.arrays_rep.d[i, j, k],
                                           np.array([b"cheese", b"bacon", b"spam"],
                                                    dtype=np.object))
コード例 #7
0
ファイル: test_idl.py プロジェクト: hitej/meta-core
 def test_arrays(self):
     s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
     assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
     assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
     assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
     assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
     assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
コード例 #8
0
ファイル: dopcode1d.py プロジェクト: benmontet/gearbox
def get_positions():
    pos = np.zeros((700, 2))
    d = readsav('infohTEST_rj157.261') 
    for i in xrange(700):
        print i,
        pos[i,1] = d.infoarr[i]['pixel']
        pos[i,0] = d.infoarr[i]['order']
コード例 #9
0
ファイル: figure_sptxhfi.py プロジェクト: zhnhou/make_figures
def restore_save(savfile):

    n = readsav(savfile)
    key = n.keys()
    if (len(key) != 1):
        exit(".sav file is not a combined end2end file")

    num_bands = int(n[key[0]]['num_bands'][0])
    bands = n[key[0]]['bands'][0]
    dbs_data = n[key[0]]['dbs_data_combined'][0]
    dbs_sims = n[key[0]]['dbs_sims_combined'][0] # (nsims, nspecs, nbands)

    winminell = int(n[key[0]]['winminell'][0])
    winmaxell = int(n[key[0]]['winmaxell'][0])

    winfunc_data = n[key[0]]['winfunc_data_combined'][0]
    winfunc_sims = n[key[0]]['winfunc_sims_combined'][0]

    cov_sv    = n[key[0]]['cov_sv_combined'][0]
    cov_noise = n[key[0]]['cov_noise_combined'][0]

    d = {'num_bands':num_bands, 'bands':bands, 
         'dbs_data':dbs_data, 'dbs_sims':dbs_sims,
         'winminell':winminell, 'winmaxell':winmaxell,
         'winfunc_data':winfunc_data, 'winfunc_sims':winfunc_sims,
         'cov_sv':cov_sv, 'cov_noise':cov_noise}

    return d
コード例 #10
0
def idlToPandas(fileName, keyValue=0):
    """PURPOSE: To restore an IDL strcture contained
    within an IDL save file and add it to a pandas
    data frame."""
    idlSavedVars = readsav(fileName)

    #check if the keyValue passed in is actually an index
    #rather than the keyValue name:
    if valIsNumber(keyValue):
        keys = idlSavedVars.keys()
        keyValue = keys[keyValue]

    struct = idlSavedVars[keyValue]
    tags = []
    for tag in struct.dtype.descr:
        tags.append(tag[0][0])

    #now take care of potential big-endian/little-endian issues
    dt = struct.dtype
    dt = dt.descr
    for i in range(len(dt)):
        if(dt[i][1][0] == '>' or dt[i][1][0] == '<'):
            dt[i] = (dt[i][0], dt[i][1][1:])
    struct = struct.astype(dt)

    pdf = pd.DataFrame.from_records(struct, columns=tags)
    return pdf
コード例 #11
0
ファイル: test_idl.py プロジェクト: hitej/meta-core
    def test_arrays_replicated_3d(self):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore',
                    message="warning: multi-dimensional structures")
            s = readsav(path.join(DATA_PATH,
                                  'struct_pointer_arrays_replicated_3d.sav'),
                        verbose=False)

        # Check column types
        assert_(s.arrays_rep.g.dtype.type is np.object_)
        assert_(s.arrays_rep.h.dtype.type is np.object_)

        # Check column shapes
        assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.h.shape, (4, 3, 2))

        # Check values
        for i in range(4):
            for j in range(3):
                for k in range(2):
                    assert_array_identical(s.arrays_rep.g[i, j, k],
                            np.repeat(np.float32(4.), 2).astype(np.object_))
                    assert_array_identical(s.arrays_rep.h[i, j, k],
                            np.repeat(np.float32(4.), 3).astype(np.object_))
                    assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
                    assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
コード例 #12
0
ファイル: sigio.py プロジェクト: alexmorrisak/davitpy
def readsav(radar,date,time,param,bandLim,procType,dataDir):
	"""
	*******************************
	
        dataObj = readsav(radar,date,time,param,bandlim)
	

	INPUTS:
	OUTPUTS:

	Written by Nathaniel 14AUG2012
	*******************************
	"""

        from scipy.io.idl import readsav
        import numpy as np
        import os.path
         
        dateSt = str(date[0])
        timeSt = '.'.join(["%s" % el for el in time])
        bandLim = np.array(bandLim) * 1.e6
        bandSt = '-'.join(["%i" % el for el in bandLim])

        a = [dateSt,radar,param,bandSt,procType,'sav']
        fileName = '.'.join(a)

        path = '/'.join([dataDir,fileName])

        if os.path.exists(path):
          dataObj = readsav(path)
          return dataObj
        else:
          print ' '.join([fileName,'does not exist.'])
          sys.exit()
コード例 #13
0
ファイル: field_topology.py プロジェクト: irbdavid/celsius
def interpolate_along_trajectory(latitude, longitude, description=None, fname=None):
    """Longitudes are necessarily positive east, as in the idlsaveile"""
    if latitude.shape != longitude.shape:
        raise ValueError('Shape mismatch')

    if not fname:
        fname = get_file()

    data = readsav(fname, verbose=False)

    inx_lat = (np.floor(latitude) + 90).astype(np.int)
    inx_lon = np.floor(((longitude % 360.) + 360.) % 360.).astype(np.int)

    if description:
        img = data[description]

        return img[inx_lat, inx_lon]

    else:
        output = dict()

        for d in list(data.keys()):
            if not 'percent' in d: continue

            output[d] = data[d][inx_lat, inx_lon]

        return output
コード例 #14
0
ファイル: test_idl.py プロジェクト: Arasz/scipy
def test_null_pointer():
    """
    Regression test for null pointers.
    """
    s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
    assert_identical(s.point, None)
    assert_identical(s.check, np.int16(5))
コード例 #15
0
ファイル: wyl.py プロジェクト: jpober/brownscripts
def load_gains_fhd(fhdsav):
    fhd_cal = readsav(fhdsav,python_dict=True)
    gfhd = {'x':{},'y':{}}
    for a in range(fhd_cal['cal']['N_TILE'][0]):
        gfhd['x'][a] = fhd_cal['cal']['GAIN'][0][0][a]
        gfhd['y'][a] = fhd_cal['cal']['GAIN'][0][1][a]
    return gfhd
コード例 #16
0
ファイル: idl_save.py プロジェクト: piScope/piScope
def load_matfile(obj):
    file=obj.path2fullpath(modename=modename,
                           pathname=pathname)
    if file == '': return
    ### clean first
    for name, child in obj.get_children():
        child.destroy()

    def idl2dict(dd, cls = collections.OrderedDict):
       r = cls()
       for name in dd.keys():
           print name
           print isinstance(dd[name], np.recarray)
           if isinstance(dd[name], np.recarray):
               r[name] = rec2dict(dd[name], cls=cls)
           else:
               r._var0[name] = dd[name]
       return r 
    
    nm0 = readsav(file)
    if nm0 is None: 
       nm0 = IDLfile()
    else:
       nm0 = idl2dict(nm0, cls = IDLfile)
    nm = IDLfile();nm['data'] = nm0
    obj.setvar0(nm)
コード例 #17
0
def main():
    masks_cat = 'megapipe'
    ###make sure to change these when running in a new enviorment!###
    #location of data directory
    filepath = '/scratch/dac29/output/processed_data/'+masks_cat+'/masks/'
    #save data to directory...
    savepath = '/scratch/dac29/output/processed_data/'+masks_cat+'/masks/'
    #################################################################

    fields = ['W1','W2','W3','W4']

    field = fields[3]
    print 'opening',field,'masks...'
    s=readsav(filepath+field+'_masks'+'.dat')
    keys = s.viewkeys()
    name = list(keys)[0]
    masks = s[name][0]

    #column names
    print masks.dtype.names
    print type(masks['ra'])

    #number of masks
    print len(masks['size'])

    filename = field+'_masks'
    np.save(savepath+filename, masks)
コード例 #18
0
ファイル: dopcode1d.py プロジェクト: benmontet/gearbox
def get_iodine(wmin, wmax):
    file = 'ftskeck50.sav'
    sav = readsav(file)
    wav = sav.w
    iod = sav.s
    use = np.where((wav >= wmin) & (wav <= wmax))
    return wav[use], iod[use]
コード例 #19
0
ファイル: test_idl.py プロジェクト: annapowellsmith/scipy
 def test_idict(self):
     custom_dict = {"a": np.int16(999)}
     original_id = id(custom_dict)
     s = readsav(path.join(DATA_PATH, "scalar_byte.sav"), idict=custom_dict, verbose=False)
     assert_equal(original_id, id(s))
     assert_("a" in s)
     assert_identical(s["a"], np.int16(999))
     assert_identical(s["i8u"], np.uint8(234))
コード例 #20
0
ファイル: test_idl.py プロジェクト: hitej/meta-core
 def test_scalars_replicated_3d(self):
     s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
     assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(np.object))
     assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
コード例 #21
0
ファイル: test_idl.py プロジェクト: BranYang/scipy
    def test_arrays_corrupt_idl80(self):
        # test byte arrays with missing nbyte information from IDL 8.0 .sav file
        with suppress_warnings() as sup:
            sup.filter(UserWarning, "Not able to verify number of bytes from header")
            s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'),
                        verbose=False)

        assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8))
コード例 #22
0
ファイル: test_idl.py プロジェクト: AlgorithmFan/scipy
    def test_arrays_corrupt_idl80(self):
        # test byte arrays with missing nbyte information from IDL 8.0 .sav file
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'),
                        verbose=False)

        assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8))
コード例 #23
0
ファイル: test_idl.py プロジェクト: hitej/meta-core
 def test_scalars(self):
     s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
     assert_identical(s.scalars.a, np.array(np.int16(1)))
     assert_identical(s.scalars.b, np.array(np.int32(2)))
     assert_identical(s.scalars.c, np.array(np.float32(3.)))
     assert_identical(s.scalars.d, np.array(np.float64(4.)))
     assert_identical(s.scalars.e, np.array([b"spam"], dtype=np.object))
     assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
コード例 #24
0
ファイル: test_idl.py プロジェクト: annapowellsmith/scipy
 def test_scalars_replicated(self):
     s = readsav(path.join(DATA_PATH, "struct_scalars_replicated.sav"), verbose=False)
     assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
     assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
     assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.0), 5))
     assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.0), 5))
     assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(np.object))
     assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.0 + 3j), 5))
コード例 #25
0
ファイル: dopcode1d.py プロジェクト: benmontet/gearbox
def get_info():
    d = readsav('infohTEST_rj157.261') 
    pre = '/Users/ozymandias1/research/gearbox/testdata/'
    for i in range(700):
        print i,
        info = d.infoarr[i]
        filename = pre+'test_'+str(i)
        np.save(filename, info)
コード例 #26
0
ファイル: test_idl.py プロジェクト: hitej/meta-core
 def test_idict(self):
     custom_dict = {'a': np.int16(999)}
     original_id = id(custom_dict)
     s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
     assert_equal(original_id, id(s))
     assert_('a' in s)
     assert_identical(s['a'], np.int16(999))
     assert_identical(s['i8u'], np.uint8(234))
コード例 #27
0
ファイル: kpi.py プロジェクト: benjaminpope/pysco
    def from_mf(self, file, maskname, array_name=""):
        ''' Creation of the KerPhase_Relation object from a matched filter file.

        ----------------------------------------------------------------
        This duplicates the functionality of from_coord_file for masking data.

        Input is a matched filter idlvar file. 
        ---------------------------------------------------------------- '''

        mfdata = readsav(file)

        maskdata = readsav(maskname)

        self.mask = maskdata['xy_coords']
        self.nbh  = mfdata.n_holes   # number of sub-Ap
        print 'nbuv = ', mfdata.n_baselines
        self.nbuv = mfdata.n_baselines

        ndgt = 6 # number of digits of precision for rounding
        prec = 10**(-ndgt)

        # ================================================
        # Create a kpi representation of the closure phase
        # operator
        # ================================================
        
        self.uv = np.zeros((self.nbuv,2))

        self.uv[:,0] = mfdata.u
        self.uv[:,1] = mfdata.v

        print self.uv.shape

        # 2. Calculate the transfer matrix and the redundancy vector
        # --------------------------------------------------------------
        self.RED = np.ones(self.nbuv, dtype=float)       # Redundancy

        self.nkphi  = mfdata.n_bispect # number of Ker-phases

        self.KerPhi = np.zeros((self.nkphi, self.nbuv)) # allocate the array
        self.TFM = self.KerPhi # assuming a non-redundant array!

        for k in range(0,self.nkphi):
            yes = mfdata.bs2bl_ix[k,:]
            self.KerPhi[k,yes] = 1
コード例 #28
0
ファイル: sub.py プロジェクト: prayash22/p3dthon
def readsave(restore_fname):
    if restore_fname[restore_fname.rfind('.'):] == '.npy':
        cr = np.load(restore_fname).all()
        for v in ['ni', 'ne', 'niav', 'neav']:
            if v in cr:
                cr['de'+v] = cr[v]
        return cr
    else:
        return readsav(restore_fname)
コード例 #29
0
 def test_compressed(self):
     s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
     assert_identical(s.i8u, np.uint8(234))
     assert_identical(s.f32, np.float32(-3.1234567e+37))
     assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
     assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
     assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
     assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
     assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
     assert_identical(s.arrays.d[0], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object))
コード例 #30
0
ファイル: lowrdx_skyspec.py プロジェクト: ntejos/PYPIT
def main(args):
    from scipy.io.idl import readsav
    from linetools.spectra.xspectrum1d import XSpectrum1D

    # Read
    lrdx_sky = readsav(args.lowrdx_sky)
    # Generate
    xspec = XSpectrum1D.from_tuple((lrdx_sky['wave_calib'], lrdx_sky['sky_calib']))
    # Write
    xspec.write_to_fits(args.new_file)
コード例 #31
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_int32(self):
     s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
     assert_identical(s.i32s, np.int32(-1234567890))
コード例 #32
0
def main():
    """
    Tme main function
    """
    #nx_quad = 1056 # For Tempo
    #ny_quad = 1046 # For Tempo
    #nlat = nx_quad*2
    #nspec = ny_quad*2


    #outlier_mask = read_outlier_mask()
    file_path = r'F:\TEMPO\Data\GroundTest\FPS\FPA_Gain_vs_Temp'
    save_dir_local_image = r'C:\Users\nmishra\Workspace\TEMPO\Storage_region_analysis\Image_Sto_Comparisons\Image'
    save_dir_local_sto = r'C:\Users\nmishra\Workspace\TEMPO\Storage_region_analysis\Image_Sto_Comparisons\Storage'
     
    save_dir_local = [save_dir_local_image, save_dir_local_sto]

    temperature_files = [each for each in os.listdir(file_path) \
                        if each.endswith('_Darks')]

    for k in range(3, len(temperature_files)):


        image_dark_files = os.path.join(file_path, temperature_files[k],
                                        'Script_Data', 'saved_quads')

        sto_dark_files = os.path.join(file_path, temperature_files[k],
                                        'Dark_Imaging', 'saved_quads')

        all_dark_files = [image_dark_files, sto_dark_files]

        for num_files in range(1, len(all_dark_files)):
            all_int_time = []
            all_med_quad_A = []
            all_med_quad_B = []
            all_med_quad_C = []
            all_med_quad_D = []
            all_std_quad_A = []
            all_std_quad_B = []
            all_std_quad_C = []
            all_std_quad_D = []
            all_tsoc_image = []
            all_dark_current_image = []
            all_tsoc_storage = []
            all_dark_current_storage = []

            dframe1 = pd.DataFrame()
            all_int_files_image = [each for each in os.listdir(all_dark_files[num_files]) \
                                 if each.endswith('.dat.sav')]

            nominal_int_files = [items for items in all_int_files_image ]
            save_dir = os.path.join(save_dir_local[num_files], temperature_files[k])
            if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
            #print(save_dir)
            for data_files in nominal_int_files[2:]:
                   
                    data_path_name_split = data_files.split('_')
                    if num_files == 0:
                        int_time = round(int(data_path_name_split[-1].split('.')[0]))
                        int_time = int(int_time)/1000
                    elif num_files == 1:
                        int_time =  data_path_name_split[4][0:3]
                       # print(int_time[0])
                        if 's' in int_time:
                            int_time = int(int_time[0])*1000

                        else:
                            int_time = int(int_time)
                    #print('integ. time= ', int_time)
                    data_file = os.path.join(all_dark_files[num_files], data_files)
                    print(data_file)
                    print(int_time)
                    IDL_variable = readsav(data_file)
                    all_full_frame = IDL_variable.q
                    all_int_time.append(int_time)
                    quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']
                    for i in range(0, 4): # 4 quads
                        quad_name = quads[i]          
                        quad = all_full_frame[:, i, :, :]
                        active_quad = np.mean(quad[:, 4:1028, 10:1034], axis=0)
                        tsoc = np.mean(quad[:, 4:1028, 1034:1056], axis=0)
                        bias_subtracted_quad = perform_bias_subtraction(active_quad, tsoc)
                        smear_subtracted_quad =  perform_smear_subtraction(bias_subtracted_quad, int(int_time))
                        #create_image(smear_subtracted_quad/int_time, title='a', figure_name='b')
                        if num_files == 0:
                            title = 'Image Region Dark Current Histogram\n' + str(quad_name) + ', Int. Time = '+ str(int_time) + ' ms, '+ 'Temp = ' + str(temperature_files[k][0:4])
                            COLOR ='blue'                    
                        elif num_files == 1:
                            title = 'Storage Region Dark Current Histogram\n' + str(quad_name) + ', Int. Time = '+ str(int_time) + ' ms, '+ 'Temp = ' + str(temperature_files[k][0:4])
                            COLOR='red'
                        figure_name = save_dir +'/' + quad_name.replace(" ","") + '_' + str(int(int_time)) + 'ms_hist_dark_current' 
                        
                        
                        
                        
                        # calculate the dark current rates
                        if int(int_time)==0:
                            smear_subtracted_quad = smear_subtracted_quad
                        else:
                            smear_subtracted_quad = smear_subtracted_quad/int(int_time)
                        
                       
                        create_hist(smear_subtracted_quad, title, figure_name, COLOR)
                        cc
                        unct = 10*np.std(filter_outlier_median(smear_subtracted_quad))/np.median(filter_outlier_median(smear_subtracted_quad))
                        if i == 0:
                            all_med_quad_A.append(np.mean(filter_outlier_median(smear_subtracted_quad)))
                            all_std_quad_A.append(unct)
                                           
                            
                        elif i == 1:
                            all_med_quad_B.append(np.mean(filter_outlier_median(smear_subtracted_quad)))
                            all_std_quad_B.append(unct)

                        elif i == 2:
                            all_med_quad_C.append(np.mean(filter_outlier_median(smear_subtracted_quad)))
                            all_std_quad_C.append(unct)

                        else:
                            all_med_quad_D.append(np.mean(filter_outlier_median(smear_subtracted_quad)))
                            all_std_quad_D.append(unct)

                        
#                        if num_files == 0: 
#                             all_tsoc_image.append(tsoc)
#                             all_dark_current_image.append(smear_subtracted_quad)
#                            
#                        elif num_files==1:
#                             all_tsoc_storage.append(tsoc)
#                             all_dark_current_storage.append(smear_subtracted_quad)
#                        
#                        active_quad = None
#                        bias_subtracted_quad = None
#                        smear_subtracted_quad = None
#
#            
#            plot_few_tsocs(all_tsoc_image, all_tsoc_storage, )
            
            dframe1 = pd.DataFrame(
                            {'Int_time.' : all_int_time,
                             'Avg_Quad_A' : all_med_quad_A,
                             'Avg_Quad_B' : all_med_quad_B,
                             'Avg_Quad_C' : all_med_quad_C,
                             'Avg_Quad_D' : all_med_quad_D,
                             'Var_Quad_A': all_std_quad_A,
                             'Var_Quad_B': all_std_quad_B,
                             'Var_Quad_C': all_std_quad_C,
                             'Var_Quad_D': all_std_quad_D,


                             })
コード例 #33
0
ファイル: fhd_cal.py プロジェクト: JIANSHULI/pyuvdata
    def read_fhd_cal(self,
                     cal_file,
                     obs_file,
                     settings_file=None,
                     raw=True,
                     extra_history=None,
                     run_check=True,
                     check_extra=True,
                     run_check_acceptability=True):
        """
        Read data from an FHD cal.sav file.

        Args:
            cal_file: The cal.sav file to read from.
            obs_file: The obs.sav file to read from.
            settings_file: The settings_file to read from. Optional, but very
                useful for provenance.
            raw: Option to use the raw (per antenna, per frequency) solution or
                to use the fitted (polynomial over phase/amplitude) solution.
                Default is True (meaning use the raw solutions).
            extra_history: Optional string or list of strings to add to the
                object's history parameter. Default is None.
            run_check: Option to check for the existence and proper shapes of
                parameters after reading in the file. Default is True.
            check_extra: Option to check optional parameters as well as required
                ones. Default is True.
            run_check_acceptability: Option to check acceptable range of the values of
                parameters after reading in the file. Default is True.
        """

        this_dict = readsav(cal_file, python_dict=True)
        cal_data = this_dict['cal']

        this_dict = readsav(obs_file, python_dict=True)
        obs_data = this_dict['obs']

        self.Nspws = 1
        self.spw_array = np.array([0])

        self.Nfreqs = int(cal_data['n_freq'][0])
        self.freq_array = np.zeros((self.Nspws, len(cal_data['freq'][0])),
                                   dtype=np.float_)
        self.freq_array[0, :] = cal_data['freq'][0]
        self.channel_width = float(np.mean(np.diff(self.freq_array)))

        # FHD only calculates one calibration over all the times.
        # cal_data.n_times gives the number of times that goes into that one
        # calibration, UVCal.Ntimes gives the number of separate calibrations
        # along the time axis.
        self.Ntimes = 1
        time_array = obs_data['baseline_info'][0]['jdate'][0]
        self.integration_time = np.round(
            np.mean(np.diff(time_array)) * 24 * 3600, 2)
        self.time_array = np.array([np.mean(time_array)])

        self.Njones = int(cal_data['n_pol'][0])
        # FHD only has the diagonal elements (jxx, jyy) and if there's only one
        # present it must be jxx
        if self.Njones == 1:
            self.jones_array = np.array([-5])
        else:
            self.jones_array = np.array([-5, -6])

        self.telescope_name = obs_data['instrument'][0]

        self.Nants_data = int(cal_data['n_tile'][0])
        self.Nants_telescope = int(cal_data['n_tile'][0])
        self.antenna_names = np.array(cal_data['tile_names'][0].tolist())
        self.antenna_numbers = np.arange(self.Nants_telescope)
        self.ant_array = np.arange(self.Nants_data)

        self.set_sky()
        self.sky_field = 'phase center (RA, Dec): ({ra}, {dec})'.format(
            ra=obs_data['orig_phasera'][0], dec=obs_data['orig_phasedec'][0])
        self.sky_catalog = cal_data['skymodel'][0]['catalog_name'][0]
        self.ref_antenna_name = cal_data['ref_antenna_name'][0]
        self.Nsources = int(cal_data['skymodel'][0]['n_sources'][0])
        self.baseline_range = [
            float(cal_data['min_cal_baseline'][0]),
            float(cal_data['max_cal_baseline'][0])
        ]

        galaxy_model = cal_data['skymodel'][0]['galaxy_model'][0]
        if galaxy_model == 0:
            galaxy_model = None
        else:
            galaxy_model = 'gsm'

        diffuse_model = cal_data['skymodel'][0]['diffuse_model'][0]
        if diffuse_model == '':
            diffuse_model = None
        else:
            diffuse_model = os.path.basename(diffuse_model)

        if galaxy_model is not None:
            if diffuse_model is not None:
                self.diffuse_model = galaxy_model + ' + ' + diffuse_model
            else:
                self.diffuse_model = galaxy_model
        elif diffuse_model is not None:
            self.diffuse_model = diffuse_model

        self.gain_convention = 'divide'
        self.x_orientation = 'east'

        self.set_gain()
        fit_gain_array_in = cal_data['gain'][0]
        fit_gain_array = np.zeros(self._gain_array.expected_shape(self),
                                  dtype=np.complex_)
        for jones_i, arr in enumerate(fit_gain_array_in):
            fit_gain_array[:, 0, :, 0, jones_i] = arr
        if raw:
            res_gain_array_in = cal_data['gain_residual'][0]
            res_gain_array = np.zeros(self._gain_array.expected_shape(self),
                                      dtype=np.complex_)
            for jones_i, arr in enumerate(fit_gain_array_in):
                res_gain_array[:, 0, :, 0, jones_i] = arr
            self.gain_array = fit_gain_array + res_gain_array
        else:
            self.gain_array = fit_gain_array

        # FHD doesn't really have a chi^2 measure. What is has is a convergence measure.
        # The solution converged well if this is less than the convergence
        # threshold ('conv_thresh' in extra_keywords).
        self.quality_array = np.zeros_like(self.gain_array, dtype=np.float)
        convergence = cal_data['convergence'][0]
        for jones_i, arr in enumerate(convergence):
            self.quality_array[:, 0, :, 0, jones_i] = arr

        # array of used frequencies (1: used, 0: flagged)
        freq_use = obs_data['baseline_info'][0]['freq_use'][0]
        # array of used antennas (1: used, 0: flagged)
        ant_use = obs_data['baseline_info'][0]['tile_use'][0]
        # array of used times (1: used, 0: flagged)
        time_use = obs_data['baseline_info'][0]['time_use'][0]

        time_array_use = time_array[np.where(time_use > 0)]
        self.time_range = [np.min(time_array_use), np.max(time_array_use)]

        # Currently this can't include the times because the flag array
        # dimensions has to match the gain array dimensions. This is somewhat artificial...
        self.flag_array = np.zeros_like(self.gain_array, dtype=np.bool)
        flagged_ants = np.where(ant_use == 0)[0]
        for ant in flagged_ants:
            self.flag_array[ant, :] = 1
        flagged_freqs = np.where(freq_use == 0)[0]
        for freq in flagged_freqs:
            self.flag_array[:, :, freq] = 1

        # currently don't have branch info. may change in future.
        self.git_origin_cal = 'https://github.com/EoRImaging/FHD'
        self.git_hash_cal = obs_data['code_version'][0]

        self.extra_keywords['autoscal'] = \
            '[' + ', '.join(str(d) for d in cal_data['auto_scale'][0]) + ']'
        self.extra_keywords['nvis_cal'] = cal_data['n_vis_cal'][0]
        self.extra_keywords['time_avg'] = cal_data['time_avg'][0]
        self.extra_keywords['cvgthres'] = cal_data['conv_thresh'][0]
        if 'DELAYS' in obs_data.dtype.names:
            if obs_data['delays'][0] is not None:
                self.extra_keywords['delays'] = \
                    '[' + ', '.join(str(int(d)) for d in obs_data['delays'][0]) + ']'

        if not raw:
            self.extra_keywords['polyfit'] = cal_data['polyfit'][0]
            self.extra_keywords['bandpass'] = cal_data['bandpass'][0]
            self.extra_keywords['mode_fit'] = cal_data['mode_fit'][0]
            self.extra_keywords['amp_deg'] = cal_data['amp_degree'][0]
            self.extra_keywords['phse_deg'] = cal_data['phase_degree'][0]

        if settings_file is not None:
            self.history, self.observer = get_fhd_history(settings_file,
                                                          return_user=True)
        else:
            warnings.warn('No settings file, history will be incomplete')
            self.history = ''

        if extra_history is not None:
            if isinstance(extra_history, (list, tuple)):
                self.history += '\n' + '\n'.join(extra_history)
            else:
                self.history += '\n' + extra_history

        if not uvutils.check_history_version(self.history,
                                             self.pyuvdata_version_str):
            if self.history.endswith('\n'):
                self.history += self.pyuvdata_version_str
            else:
                self.history += '\n' + self.pyuvdata_version_str

        if run_check:
            self.check(check_extra=check_extra,
                       run_check_acceptability=run_check_acceptability)
コード例 #34
0
ファイル: pls_test.py プロジェクト: freesiemens/Working
import numpy
from sklearn.metrics import mean_squared_error
from sklearn.cross_decomposition import PLSRegression
from scipy.io.idl import readsav
import csv
import matplotlib.pyplot as plot
import pickle

#db_spectra=genfromtxt(r'C:\Users\rbanderson\Documents\MSL\ChemCam\PDL\PL'
#'S\PLS1_20130829\database_input\cleanroom_3m_recal_newir.csv',delimiter=',')

db = readsav(
    r'C:\Users\rbanderson\IDLWorkspace82\PLS1_fold\database_input\Spectra_1600mm_LANL_indexed.sav'
)
db_spectra = db['database_spectra'][0][0]
db_std = db['database_spectra'][0][1]
wave = db['database_spectra'][0][2]
db_spect_index = numpy.array((db['database_spectra'][0][3]), dtype='int')

db_std_index = db_std + numpy.array(db_spect_index, dtype='string')

db_comps = readsav(
    r'C:\Users\rbanderson\IDLWorkspace82\PLS1_fold\database_input\database_comps_majors_20140304.sav'
)
db_comp_names = db_comps['ccam_std'][0][0]
db_ox_comp = db_comps['ccam_std'][0][1]
db_ox_list = db_comps['ccam_std'][0][2]

#wave=genfromtxt(r'C:\Users\rbanderson\Documents\MSL\ChemCam\PDL\PLS\PLS1'
#'_20130829\database_input\cleanroom_3m_recal_newir_wave.csv')
#
コード例 #35
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_uint16(self):
     s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
     assert_identical(s.i16u, np.uint16(65511))
コード例 #36
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_int64(self):
     s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
     assert_identical(s.i64s, np.int64(-9223372036854774567))
コード例 #37
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_1d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'),
                 verbose=False)
     assert_equal(s.array1d.shape, (123, ))
コード例 #38
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_3d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'),
                 verbose=False)
     assert_equal(s.array3d.shape, (11, 22, 12))
     assert_true(np.all(s.array3d == np.float32(4.)))
     assert_true(np.all(vect_id(s.array3d) == id(s.array3d[0, 0, 0])))
コード例 #39
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_5d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'),
                 verbose=False)
     assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
     assert_true(np.all(s.array5d == np.float32(4.)))
     assert_true(np.all(vect_id(s.array5d) == id(s.array5d[0, 0, 0, 0, 0])))
コード例 #40
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_byte(self):
     s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
     assert_identical(s.i8u, np.uint8(234))
コード例 #41
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_int16(self):
     s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
     assert_identical(s.i16s, np.int16(-23456))
コード例 #42
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_bytes(self):
     s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
     assert_identical(
         s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
コード例 #43
0
ファイル: test_scratch.py プロジェクト: acpaquette/PyHAT
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 07 12:29:20 2014

@author: rbanderson
"""

import glob

from libpyhat.spectral.baseline_code import ccam_remove_continuum
import numpy
from scipy.io.idl import readsav

filelist = glob.glob(r"E:\ChemCam\Calibration Data\LANL_testbed\Caltargets\*.SAV")
data = readsav(filelist[0])
muv = data['calibspecmuv']
muv_orig = muv
x = numpy.arange(len(muv))

# muv_denoise,muv_noise=ccam_denoise.ccam_denoise(muv,sig=3,niter=4)
# plot.figure()
# plot.plot(muv_noise)

test = ccam_remove_continuum.ccam_remove_continuum(x, muv, 5, 2, 2)
plot.figure()
plot.plot(test)
plot.plot(muv_orig)
cont = muv_orig - test
print(cont[0:20])
plot.plot(cont)
pass
コード例 #44
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_3d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'),
                 verbose=False)
     assert_equal(s.array3d.shape, (11, 22, 12))
コード例 #45
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_1d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'),
                 verbose=False)
     assert_equal(s.array1d.shape, (123, ))
     assert_true(np.all(s.array1d == np.float32(4.)))
     assert_true(np.all(vect_id(s.array1d) == id(s.array1d[0])))
コード例 #46
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_inheritance(self):
     s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False)
     assert_identical(s.fc.x, np.array([0], dtype=np.int16))
     assert_identical(s.fc.y, np.array([0], dtype=np.int16))
     assert_identical(s.fc.r, np.array([0], dtype=np.int16))
     assert_identical(s.fc.c, np.array([4], dtype=np.int16))
コード例 #47
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_uint64(self):
     s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
     assert_identical(s.i64u, np.uint64(18446744073709529285))
コード例 #48
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_8d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'),
                 verbose=False)
     assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
コード例 #49
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_uint32(self):
     s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
     assert_identical(s.i32u, np.uint32(4294967233))
コード例 #50
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_7d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'),
                 verbose=False)
     assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
コード例 #51
0
def main():
    """
    Tme main function
    """
    #nx_quad = 1056 # For Tempo
    #ny_quad = 1046 # For Tempo
    #nlat = nx_quad*2
    #nspec = ny_quad*2
    file_path = r'C:\Users\nmishra\Workspace\TEMPO\Data\GroundTest\FPA_Gain_Vs_Temp\-20C_PT_Light\Script_Data\saved_quads'
    data_path_all = [
        each for each in os.listdir(file_path)
        if each.endswith('_19981.dat.sav')
    ]
    #file_to_check = random.choice(data_path_all)
    #file_to_check = r'SPI_20160916022643568_PT_VS_TEMPERATURE_-20C_LIGHT_210_FT6_NOM_INT_99999.dat.sav'
    file_to_check = data_path_all[0]
    print(file_to_check)
    #cc
    data_path_name_split = file_to_check.split('_')
    int_time = round(int(
        data_path_name_split[-1].split('.')[0]))  #for integ_sweep
    #temp = data_path_name_split[5]
    #print(temp)
    data_file = os.path.join(file_path, file_to_check)
    IDL_variable = readsav(data_file)
    all_quads_frame = IDL_variable.q
    # let's perform the bias subtraction first
    quad_D_all_frame = all_quads_frame[:, 3, :, :]
    active_quad_D = quad_D_all_frame[:, 4:1028, 10:1034]
    bias_val_D = quad_D_all_frame[:, 4:1028, 1034:1056]
    avg_bias_D = np.mean(bias_val_D, axis=2)
    bias_subtracted_quad_D = active_quad_D - avg_bias_D[:, :, None]

    # let's work on quad D as it has least number of outliers
    active_quad_D_avg = np.mean(np.array(bias_subtracted_quad_D), axis=0)

    # let us plot the histogram of the variance of each of the active pixels
    label = 'Quad D'
    variance_all_active_pixels = np.std(bias_subtracted_quad_D, axis=0)

    nx_quad, ny_quad = variance_all_active_pixels.shape
    plt.figure(figsize=(10, 8))
    plt.hist(np.reshape(variance_all_active_pixels, (nx_quad * ny_quad, 1)),
             30,
             normed=0,
             facecolor='magenta',
             alpha=1)
    title = 'Histogram of Variance of all active pixels (' + 'int.time = ' + str(
        int_time) + ' microsecs)'
    plt.title(title, fontsize=14, fontweight="bold")
    plt.grid(True, linestyle=':')
    plt.xlabel('Temporal Noise (Variance)', fontsize=14, fontweight="bold")
    plt.ylabel('Frequency', fontsize=14, fontweight="bold")
    ax = plt.gca()
    #plt.xlim(0, 300)
    ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
    plt.show()
    cc
    save_dir = r'C:\Users\nmishra\Workspace\TEMPO\Data\GroundTest\FPA_Bias_Vs_Temp\-18C_Darks_Data\Script_Data\saved_quads\test_variance_0_int'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    plt.savefig(save_dir + '/' + 'all_variance.png', dpi=100)

    # now lets' plot the histograms of three cases.
    # a. average of quad D
    # b. random quad D from 100 frames
    # c. All 100 frames

    frames = randint(0, 9)
    quad_D_hist = [
        active_quad_D_avg, active_quad_D[frames, :, :], active_quad_D
    ]
    texts_use = [
        'Average of 100 frames', 'Single frame' + str(frames), 'All 100 Frames'
    ]
    face_color = ['red', 'blue', 'green']
    for i in range(0, 3):
        if quad_D_hist[i].ndim == 3:
            ndims, nx_quad, ny_quad = quad_D_hist[i].shape
        else:
            ndims = 1
            nx_quad, ny_quad = quad_D_hist[i].shape
        quad = quad_D_hist[i]

        outlier_filtered_data = filter_outlier_median(quad, ndims, nx_quad,
                                                      ny_quad)
        label = str(file_to_check)
        plt.figure(num=i, figsize=(10, 8))
        plt.hist(outlier_filtered_data,
                 50,
                 normed=0,
                 facecolor=face_color[i],
                 alpha=0.75,
                 label=label)
        plt.legend(loc='best')
        title = 'Histograms of Quad D (' + texts_use[i] + ')'
        plt.title(title, fontsize=14, fontweight="bold")
        plt.grid(True, linestyle=':')
        #plt.xlim(800, 860)
        plt.xlabel('DN', fontsize=14, fontweight="bold")
        plt.ylabel('Frequency', fontsize=14, fontweight="bold")
        ax = plt.gca()
        #ax.get_xaxis().get_major_formatter().set_scientific(False)
        ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
        plt.savefig(save_dir + '/' + texts_use[i] + '.png', dpi=100)
        plt.close('all')
コード例 #52
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_6d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'),
                 verbose=False)
     assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
コード例 #53
0
def main():
    """Loads the LOWCAT catalogue,
    fixes some data formats,
    then starts creating the plots for the paper"""
    # Load data
    #============================

    # Load the .sav file
    savfile = readsav(CAT_FOLDER + LOWCAT_FILE)

    # Fix some of the data into a format that is more suitable
    outstr = fix_data(savfile['outstr'])
    df = pd.DataFrame(outstr)

    # Calculate flare duration
    df['FL_DURATION'] = calculate_flare_duration(df['FL_STARTTIME'],
                                                 df['FL_ENDTIME'])
    df['COR2_TS'] = pd.to_datetime(df['COR2_TS'],
                                   format='%d-%b-%Y %H:%M:%S.%f')
    df['COR2_TF'] = pd.to_datetime(df['COR2_TF'],
                                   format='%d-%b-%Y %H:%M:%S.%f')
    df['COR2_DURATION'] = calculate_flare_duration(df['COR2_TS'],
                                                   df['COR2_TF'])

    # Load Jordan's FLARECAST data
    csvdata = pd.read_csv(CAT_FOLDER + FLARECAST_FILE)

    # Load location info
    locdata = readsav(CAT_FOLDER + LOC_FILE)

    # Create AR and flare location figure
    #============================
    ar_flare_locations(locdata)

    # Create Appendix histograms
    #============================
    cf.set_config_file(offline=False, world_readable=True, theme='pearl')

    # CME and flare properties
    df['FL_GOES'] = np.log10(df['FL_GOES'].astype('float64'))
    df_cme_hists = df[['COR2_WIDTH', 'COR2_V', 'FL_GOES', 'FL_DURATION']]
    df_cme_hists.iplot(kind='histogram',
                       subplots=True,
                       shape=(2, 2),
                       filename='cmeflare_hist',
                       histnorm='percent')

    # SMART properties
    df_smart_hists = df[[
        'SMART_TOTAREA', 'SMART_TOTFLX', 'SMART_BMIN', 'SMART_BMAX',
        'SMART_PSLLEN', 'SMART_BIPOLESEP', 'SMART_RVALUE', 'SMART_WLSG'
    ]]
    df_smart_hists.iplot(kind='histogram',
                         subplots=True,
                         shape=(4, 2),
                         filename='smart_hist',
                         histnorm='percent')

    # SHARP properties
    df_flarecast_hists_sharp = csvdata[[
        'total (FC data.sharp kw.usiz)', 'max (FC data.sharp kw.usiz)',
        'ave (FC data.sharp kw.ushz)', 'max (FC data.sharp kw.ushz)',
        'total (FC data.sharp kw.usflux)', 'max (FC data.sharp kw.jz)',
        'max (FC data.sharp kw.hgradbh)'
    ]]
    df_flarecast_hists_sharp.iplot(kind='histogram',
                                   subplots=True,
                                   shape=(4, 2),
                                   filename='fcast_hist_sharp',
                                   histnorm='percent')

    # Other FLARECAST properties
    df_flarecast_hists = csvdata[[
        'Value Int', 'R Value Br Logr', 'Ising Energy', 'Abs Tot Dedt',
        'Tot L Over Hmin', 'Alpha'
    ]]
    df_flarecast_hists.iplot(kind='histogram',
                             subplots=True,
                             shape=(3, 2),
                             filename='fcast_hist',
                             histnorm='percent')

    # Main figures
    #============================

    # Figure 6: SRS area vs GOES flux with Hale Class
    srs_area_complexity(df=df)

    # Figure 7 top: GOES flux and WLSG vs CME speed. Colourbar shows angular width (halo)
    plotly_double(x1data=np.log10(df['FL_GOES'].astype('float64')),
                  x1title='GOES Flux [Wm-2]',
                  x2data=df['SMART_WLSG'].astype('float64'),
                  x2title='WLsg [G/Mm]',
                  y1data=df['COR2_V'].astype('float64'),
                  y1title='CME Speed [ms<sup>-1</sup>]',
                  y1range=[0., 2000.],
                  weightdata='10',
                  colourdata=df['COR2_WIDTH'].astype('float64'),
                  colourdata_title='CME width [<sup>o</sup>]',
                  colourdata_max=360,
                  colourdata_min=0,
                  colourdata_step=90,
                  filedata='halo_cme_properties_new_colorscale',
                  colourscale=[[0, 'rgb(54,50,153)'], [0.25, 'rgb(54,50,153)'],
                               [0.25, 'rgb(17,123,215)'],
                               [0.5, 'rgb(17,123,215)'],
                               [0.5, 'rgb(37,180,167)'],
                               [0.75, 'rgb(37,180,167)'],
                               [0.75, 'rgb(249,210,41)'],
                               [1.0, 'rgb(249,210,41)']])  #'Viridis'

    # Figure 7 bottom: Bmin.max, Total area and flux, PSL length, and R value vs CME speed. Colours show flare class.
    plotly_multi(x1data=np.log10(np.abs(df['SMART_BMIN'].astype('float64'))),
                 x1title='Bmin [G]',
                 x2data=np.log10(df['SMART_BMAX'].astype('float64')),
                 x2title='Bmax [G]',
                 x3data=np.log10(df['SMART_TOTAREA'].astype('float64')),
                 x3title='Total area [m.s.h]',
                 x4data=np.log10(df['SMART_TOTFLX'].astype('float64')),
                 x4title='Total flux [Mx]',
                 x5data=df['SMART_RVALUE'].astype('float64'),
                 x5title='R value [Mx]',
                 x6data=df['SMART_WLSG'].astype('float64'),
                 x6title='WLsg [G/Mm]',
                 y1data=np.log10(df['COR2_V'].astype('float64')),
                 y1title='CME Speed [kms<sup>-1</sup>]',
                 y1range=[2, 3.2],
                 weightdata='10',
                 colourdata=np.log10(df['FL_GOES'].astype('float64')),
                 colourdata_title='GOES Flux [Wm-2]',
                 colourdata_max=-3,
                 colourdata_min=-7,
                 colourdata_step=1,
                 filedata='smart_properties',
                 colourscale=[[0, 'rgb(54,50,153)'], [0.25, 'rgb(54,50,153)'],
                              [0.25, 'rgb(17,123,215)'],
                              [0.5, 'rgb(17,123,215)'],
                              [0.5, 'rgb(37,180,167)'],
                              [0.75, 'rgb(37,180,167)'],
                              [0.75, 'rgb(249,210,41)'],
                              [1.0, 'rgb(249,210,41)']])
コード例 #54
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_5d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'),
                 verbose=False)
     assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
コード例 #55
0
ファイル: plotz.py プロジェクト: lrpatrick/rsg-janal
from scipy.io.idl import readsav

import sys
sys.path.append("/home/lee/Work/RSG-JAnal/bin/.")

import contfit
import resolution as res


def trimspec(w1, w2, s2):
    """Trim s2 and w2 to match w1"""
    roi = np.where((w2 > w1.min()) & (w2 < w1.max()))[0]
    return w2[roi], s2[roi]


mod = readsav(
    '../models/MODELSPEC_2013sep12_nLTE_R10000_J_turb_abun_grav_temp-int.sav')

grid = mod['modelspec'][0][0]
par = mod['modelspec'][0][1]
wave = mod['modelspec'][0][2]

n6822 = np.genfromtxt('../../ngc6822/Spectra/N6822-spec-24AT.v2-sam.txt')
nspec = n6822[:, 1:] / np.median(n6822[:, 1:])
owave, ospec = trimspec(wave, n6822[:, 0], nspec)

mssam = contfit.specsam(wave, grid, owave)
mdeg = res.degrade(owave, mssam, 10000, 3000)

# ############################################################################
# vary-micro:
f, ax = plt.subplots(2, 2, figsize=(12, 12))
コード例 #56
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_4d(self):
     s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'),
                 verbose=False)
     assert_equal(s.array4d.shape, (4, 5, 8, 7))
コード例 #57
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_float64(self):
     s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
     assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
コード例 #58
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_float32(self):
     s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
     assert_identical(s.f32, np.float32(-3.1234567e+37))
コード例 #59
0
ファイル: test_idl.py プロジェクト: melshaer/MiniBloq-Sparki
 def test_complex32(self):
     s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'),
                 verbose=False)
     assert_identical(s.c32, np.complex64(3.124442e13 - 2.312442e31j))
コード例 #60
0
def read_cluster_grinder(filepath):
    ''' Import Robs Spitzer data

    read Rob's IDL format and make it into a a catalog, 
    deleting multiple columns and adding identifiers
    
    Parameters
    ----------
    filepath : string
        Path to a directory that holds the output of the ClusterGrinder
        pipeline. All files need to have standard names.
        Specifically, this routine reads:
        
            - ``cg_merged_srclist_mips.sav``
            - ``cg_classified.sav``

    Returns
    -------
    cat : astropy.table.Table
        Table with 2MASS ans Spitzer magnitudes and the clustergrinder 
        classification.
    '''
    s = readsav(os.path.join(filepath, 'cg_merged_srclist_mips.sav'))
    coo = np.ma.array(s.out[:, 0:20], mask=(s.out[:, 0:20] == 0.))
    s.out[:, 20:30][np.where(s.out[:, 20:30] < -99)] = np.nan
    s.out[:, 30:40][np.where(s.out[:, 30:40] == 10)] = np.nan

    dat = Table()
    dat.add_column(
        Column(name='RA',
               data=np.ma.mean(coo[:, [0, 2, 4, 12, 14, 16, 18]], axis=1),
               unit='deg',
               format='9.6g'))
    #RA is avarage of all valid (non-zero) Ra values in 2MASS JHK, IRAC 1234
    dat.add_column(
        Column(name='DEC',
               data=np.ma.mean(coo[:, [1, 3, 5, 13, 15, 17, 19]], axis=1),
               unit='deg',
               format='+9.6g'))

    robsyukyformat = {
        'J_MAG': 20,
        'H_MAG': 21,
        'K_MAG': 22,
        'J_ERR': 30,
        'H_ERR': 31,
        'K_ERR': 32,
        'IRAC_1': 26,
        'IRAC_2': 27,
        'IRAC_3': 28,
        'IRAC_4': 29,
        'IRAC_1_ERR': 36,
        'IRAC_2_ERR': 37,
        'IRAC_3_ERR': 38,
        'IRAC_4_ERR': 39
    }
    for col in robsyukyformat:
        dat.add_column(
            Column(name=col,
                   data=s.out[:, robsyukyformat[col]],
                   unit='mag',
                   format='4.2g'))

    s.mips[:, 2][np.where(s.mips[:, 2] == -100)] = np.nan
    s.mips[:, 3][np.where(s.mips[:, 3] == 10)] = np.nan
    dat.add_column(
        Column(name='MIPS', data=s.mips[:, 2], unit='mag', format='4.2g'))
    dat.add_column(
        Column(name='MIPS_ERR', data=s.mips[:, 3], unit='mag', format='4.2g'))

    IRclass = readsav(os.path.join(filepath, 'cg_classified.sav'))
    dat.add_column(Column(name='IRclass', dtype='|S5', length=len(dat)))
    for n1, n2 in zip(['wdeep', 'w1', 'w2', 'wtd', 'w3'],
                      ['I*', 'I', 'II', 'II*', 'III']):
        if n1 in IRclass:
            dat['IRclass'][IRclass[n1]] = n2
    dat.add_column(
        Column(name='AK', data=IRclass.ak, unit='mag', format='4.2g'))

    return dat