Exemple #1
0
def mass_flux_plot(*args,**kwargs):
    fltm = idls.read(args[0])
    injm = idls.read(args[1])
    f1 = plt.figure()

    ax1 = f1.add_subplot(211)
    plt.plot(injm.nt_sc,injm.nmf_rscale,'r')
    plt.plot(injm.nt_sc,injm.nmf_zscale,'b')
    plt.plot(injm.nt_sc,injm.nmf_z0scale,'k')
    plt.plot(injm.nt_sc,(injm.nmf_rscale+injm.nmf_zscale),'g')
    plt.axis([0.0,160.0,0.0,3.5e-5])
    plt.minorticks_on()
    locs,labels = plt.yticks()
    plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e5))
    plt.text(0.0, 1.03, r'$10^{-5}$', transform = plt.gca().transAxes)
    plt.xlabel(r'Time [yr]',labelpad=6)
    plt.ylabel(r'$\dot{\rm M}_{\rm out} [ \rm{M}_{\odot} \rm{yr}^{-1} ]$',labelpad=15)
    
    ax2 = f1.add_subplot(212)
    plt.plot(fltm.nt_sc,fltm.nmf_rscale,'r')
    plt.plot(fltm.nt_sc,fltm.nmf_zscale,'b')
    plt.plot(fltm.nt_sc,fltm.nmf_z0scale,'k')
    plt.plot(fltm.nt_sc,(fltm.nmf_rscale+fltm.nmf_zscale),'g')
    plt.axis([0.0,160.0,0.0,4.0e-5])
    plt.minorticks_on()
    locs,labels = plt.yticks()
    plt.yticks(locs, map(lambda x: "%.1f" % x, locs*1e5))
    plt.text(0.0, 1.03, r'$10^{-5}$', transform = plt.gca().transAxes)
    plt.xlabel(r'Time [yr]',labelpad=6)
    plt.ylabel(r'$\dot{\rm M}_{\rm out} [ \rm {M}_{\odot} \rm{yr}^{-1} ]$',labelpad=15)
Exemple #2
0
def photoz(s1100,e1100=0.,s14=0.,e14=0.,ntry=50000):
    '''
    Determine the photometric redshift of a galaxy given the
    measured 1.4 cm and 1100 micron flux and uncertainty
    '''
    z = np.arange(0,10,.05)
    ngal = 44
    if s14 == 0:
        ratioin = -1
        ratiosig = -1
    else:
        ratioin = s1100/s14
        ratiosig = (e1100/s1100**2+e14/s14**2)**.5
    a = idlsave.read('fluxratio1100.sav')
    dat = a.get('data')
    zs = a.get('redshift')
    averatio = np.zeros(200)
    sigma = np.zeros(200)
    array = np.random.randn(ntry)
    array1 = np.random.randn(ntry)
    if s14 <= 0.:
        ydarts = (s1100+array*e1100)/(np.abs(array1*e14))
    else:
        ydarts = array*ratiosig+ratioin
    xdarts = np.zeros(ntry)
    for i in range(ntry):
        jrangal = np.floor(ngal*np.random.rand(1))[0]
        testtrack = dat[:,jrangal]
        yval = ydarts[i]
        xdarts[i] = np.interp(yval,testtrack,z)
    return xdarts,ydarts
    def __init__(self,infile):
        mypath=os.getcwd()
        if mypath.find('Users') > -1:
            print "Running on Rose's mac pro or laptop"

        elif mypath.find('home') > -1:
            print "Running on coma"


        cefile=idlsave.read(infile)
        self.nulnu_iras25=cefile['nulnu_iras25']
        self.nulnu_iras100=cefile['nulnu_iras100']
        self.nulnu_iras12=cefile['nulnu_iras12']
        self.nulnu_iras60=cefile['nulnu_iras60']
        self.nulnuinlsun=cefile['nulnuinlsun']
        self.lir_sanders=cefile['lir_sanders']
        self.lir=cefile['lir']
        self.nulnu_lw3=cefile['nulnu_lw3']
        self.nulnu_lw2=cefile['nulnu_lw2']
        self.lamb=cefile['lambda']
        #
        # convert all to double-precision arrays
        #
        self.lamb=array(self.lamb,'d')
        self.nulnu_iras25=array(self.nulnu_iras25,'d')
        self.nulnu_iras100=array(self.nulnu_iras100,'d')
        self.nulnu_iras12=array(self.nulnu_iras12,'d')
        self.nulnu_iras60=array(self.nulnu_iras60,'d')
        self.nulnuinlsun=array(self.nulnuinlsun,'d')
        self.lir_sanders=array(self.lir_sanders,'d')
        self.lir=array(self.lir,'d')
        self.nulnu_lw3=array(self.nulnu_lw3,'d')
        self.nulnu_lw2=array(self.nulnu_lw3,'d')
Exemple #4
0
 def test_arrays(self):
     s = idlsave.read(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
     assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
     assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
     assert_true(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
     assert_true(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
     assert_true(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
Exemple #5
0
 def test_scalars(self):
     s = idlsave.read(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
     assert_identical(s.scalars.a, np.array(np.int16(1)))
     assert_identical(s.scalars.b, np.array(np.int32(2)))
     assert_identical(s.scalars.c, np.array(np.float32(3.)))
     assert_identical(s.scalars.d, np.array(np.float64(4.)))
     assert_identical(s.scalars.e, np.array(asbytes_nested(["spam"]), dtype=np.object))
     assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
Exemple #6
0
 def test_scalars_replicated_3d(self):
     s = idlsave.read(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
     assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.e, np.repeat(asbytes("spam"), 24).reshape(4, 3, 2).astype(np.object))
     assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
Exemple #7
0
 def test_idict(self):
     custom_dict = {'a': np.int16(999)}
     original_id = id(custom_dict)
     s = idlsave.read(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
     assert_equal(original_id, id(s))
     assert_true('a' in s)
     assert_identical(s['a'], np.int16(999))
     assert_identical(s['i8u'], np.uint8(234))
Exemple #8
0
 def test_compressed(self):
     s = idlsave.read(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
     assert_identical(s.i8u, np.uint8(234))
     assert_identical(s.f32, np.float32(-3.1234567e+37))
     assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
     assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
     assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
     assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
     assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
     assert_identical(s.arrays.d[0], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object))
def read_alfalfa_file(filename):
    """
    Read the contents of a whole ALFALFA source file
    """
    savfile = idlsave.read(filename)

    source_dict = dict([(name,read_alfalfa_source(savfile,ii)) for ii,name in
        enumerate(savfile.src.SRCNAME)])

    return source_dict
Exemple #10
0
def load_data(obsname):
    """
    load data file given an obs date/number 
    
    Example
    -------
    >>> data = load_data('gal_010.47+00.03/130820_ob3')
    """
    filename = obsname+"_band%ii_clean_music_20130815_map.sav"
    data = {k:idlsave.read(filename % k, verbose=False) for k in xrange(4)}
    return data
Exemple #11
0
 def _read_raw(self):
     """
     Reads the raw data from the retrieval virtual or real file
     """
     f = self.retrieval.read_data(self.unique_id)
     # Load wav files
     k = idlsave.read(f, verbose=False)['k']
     self.flux = k.f[0]
     self.timestamps = k.t[0]
     self.length = len(self.flux)
     return flux
Exemple #12
0
    def __init__(self, infile):
        a = idlsave.read(infile)
        if infile.find('agctotal') > -1:
            a.agc = a.agctotal
        elif infile.find('agcnorthminus1') > -1:
            a.agc = a.agcnorthminus1
        elif infile.find('agcnorth') > -1:
            a.agc = a.agcnorth

        self.outfile = infile.split('.sav')[0] + '.fits'
        self.agcnumber = a.agc.agcnumber[0]
        self.which = a.agc.which[0]
        self.rah = array(a.agc.rah[0], 'f')
        self.ram = array(a.agc.ram[0], 'f')
        self.ras10 = array(a.agc.ras10[0], 'f')
        self.radeg = zeros(len(self.ras10), 'd')
        self.radeg = 15. * (self.rah + self.ram / 60. +
                            self.ras10 / 10. / 3600.)
        self.decd = array(a.agc.decd[0], 'f')
        self.decm = array(a.agc.decm[0], 'f')
        self.decs = array(a.agc.decs[0], 'f')
        self.decdeg = zeros(len(self.ras10), 'd')
        self.decdeg = self.decd + self.decm / 60. + self.decs / 3600.
        self.a100 = a.agc.a100[0]
        self.b100 = a.agc.b100[0]
        self.mag10 = a.agc.mag10[0]
        self.inccode = a.agc.inccode[0]
        self.posang = a.agc.posang[0]
        self.description = a.agc.description[0]
        self.bsteintype = a.agc.bsteintype[0]
        self.vopt = a.agc.vopt[0]
        self.verr = a.agc.verr[0]
        self.extrc3 = a.agc.extrc3[0]
        self.extdirbe = a.agc.extdirbe[0]
        self.vsource = a.agc.vsource[0]
        self.ngcic = a.agc.ngcic[0]
        self.flux100 = a.agc.flux100[0]
        self.rms100 = a.agc.rms100[0]
        self.v21 = a.agc.v21[0]
        self.width = a.agc.width[0]
        self.widtherr = a.agc.widtherr[0]
        self.telcode = a.agc.telcode[0]
        self.detcode = a.agc.detcode[0]
        self.hisource = a.agc.hisource[0]
        self.statuscode = a.agc.statuscode[0]
        self.snratio = a.agc.snratio[0]
        self.ibandqual = a.agc.ibandqual[0]
        self.ibandsrc = a.agc.ibandsrc[0]
        self.irasflag = a.agc.irasflag[0]
        self.icluster = a.agc.icluster[0]
        self.hidata = a.agc.hidata[0]
        self.iposition = a.agc.iposition[0]
        self.ipalomar = a.agc.ipalomar[0]
        self.rc3flag = a.agc.rc3flag[0]
 def test_pointers_replicated_3d(self):
     s = idlsave.read(path.join(DATA_PATH,
                                'struct_pointers_replicated_3d.sav'),
                      verbose=False)
     assert_identical(
         s.pointers_rep.g,
         np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_))
     assert_identical(
         s.pointers_rep.h,
         np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_))
     assert_true(
         np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def stellar_grid():
    #read in stellar parameters
    star_pars = idlsave.read('fit_sed_grid_seds_av025_rv05.sav')
    #get logg, teff grid
    grav_vals = star_pars.grid_seds['logg']
    temp_vals = star_pars.grid_seds['logt'] 
    #find non-zero elements (i.e., have models)
    gtindxs = np.where(temp_vals > 0.)
    temp_vals = temp_vals[gtindxs]
    n_gravs = len(grav_vals)
    ggindx = np.where(grav_vals[1:n_gravs-1] > 0.)
    grav_vals = [grav_vals[0], (grav_vals[1:n_gravs-1])[ggindxs]]
Exemple #15
0
def read_gbt_nh3(ret_idl=False):
    """
    Read BGPS GBT NH3 observation and temperature fit catalog. Citation:
    Dunham et al. (2011), Rosolowsky et al. (in prep.).

    Parameters
    ----------
    ret_idl : Boolean, default False
        Return list of record arrays from IDL save file

    Returns
    -------
    gbt_nh3 : pandas.DataFrame
        Output catalog in a pandas DataFrame object
    idl_list : list
        IDL save file products
    """
    if type(ret_idl) != bool:
        raise TypeError('ret_idl must be Boolean')
    import idlsave
    import warnings
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        nh3_1 = idlsave.read(d.cat_dir + d.gbt_nh3_1_filen, verbose=False)
        nh3_2 = idlsave.read(d.cat_dir + d.gbt_nh3_2_filen, verbose=False)
        nh3_3 = idlsave.read(d.cat_dir + d.gbt_nh3_3_filen, verbose=False)
    idl_list = [nh3_1, nh3_2, nh3_3]
    nh3_1 = _pd.DataFrame(nh3_1.s)
    nh3_2 = _pd.DataFrame(nh3_2.s)
    nh3_3 = _pd.DataFrame(nh3_3.s)
    df_list = [nh3_1, nh3_2, nh3_3]
    gbt_nh3 = _pd.concat(df_list, ignore_index=True)
    gbt_nh3['SNR11'] = gbt_nh3['PK11'] / gbt_nh3['NOISE11']
    gbt_nh3['SNR22'] = gbt_nh3['PK22'] / gbt_nh3['NOISE22']
    gbt_nh3['SNR33'] = gbt_nh3['PK33'] / gbt_nh3['NOISE33']
    if ret_idl:
        return gbt_nh3, idl_list
    else:
        return gbt_nh3
def read_alfalfa_source(savfile, sourcenumber=0):
    """
    Create an Observation Block class for a single source in an ALFALFA
    'source' IDL save file
    """

    if type(savfile) is str and ".src" in savfile:
        savfile = idlsave.read(savfile)
    
    src = savfile.src[sourcenumber]

    header = pyfits.Header()

    splist = []
    for spectra in src.spectra:
        for par in spectra.dtype.names:
            try:
                len(spectra[par])
            except TypeError:
                header[par[:8]] = spectra[par]

        # assume ALFALFA spectra in Kelvin
        header['BUNIT'] = 'K'

        xarr = pyspeckit.spectrum.units.SpectroscopicAxis(spectra.velarr,
                refX=header['RESTFRQ'], refX_units='MHz', unit='km/s')

        data = np.ma.masked_where(np.isnan(spectra.spec), spectra.spec)

        sp = pyspeckit.Spectrum(xarr=xarr, data=data, header=header)

        # the Source has a baseline presubtracted (I think)
        sp.baseline.baselinepars = spectra.baseline[::-1]
        sp.baseline.subtracted = True
        sp.baseline.order = len(spectra.baseline)
        sp.baseline.basespec = np.poly1d(sp.baseline.baselinepars)(np.arange(xarr.shape[0]))

        # There are multiple components in each Spectrum, but I think they are not indepedent
        sp.specfit.fittype = 'gaussian'
        sp.specfit.fitter = sp.specfit.Registry.multifitters['gaussian']
        modelpars = zip(spectra['STON'],spectra['VCEN'],spectra['WIDTH']) 
        modelerrs = zip(spectra['STON'],spectra['VCENERR_STAT'],spectra['WIDTHERR']) 
        sp.specfit.modelpars = modelpars[0] # only use the first fit
        sp.specfit.fitter.mpp = modelpars[0]
        sp.specfit.modelerrs = modelerrs[0]
        sp.specfit.fitter.mpperr = modelerrs[0]
        sp.specfit._full_model()

        splist.append(sp)

    return pyspeckit.ObsBlock(splist)
Exemple #17
0
 def get_bls_data(self):
     try: 
         bls = idlsave.read(self.get_cached_bls_filename(), verbose=False)['blsstr']
         self.snr = [x.snr for x in bls]
         self.snr_periods = [x.period for x in bls][0]
         self.period = [x.pnew[0] for x in bls][0]
         self.duration = [x.pnew[2] for x in bls][0]
         self.t0 = [x.pnew[1] for x in bls][0]
         self.impact_parameter = [x.pnew[3] for x in bls][0]
         self.limb_darkening = [x.pnew[4] for x in bls][0]
         self.radius_ratio = [x.pnew[6] for x in bls][0]
     except Exception as e:
         logger.warn("Failed to read downloaded file :" + self.get_cached_search_filename()+ ' ' +str(e))
     return [self.period, self.t0, self.impact_parameter, self.limb_darkening, None, self.radius_ratio]
 def test_arrays(self):
     s = idlsave.read(path.join(DATA_PATH, 'struct_arrays.sav'),
                      verbose=False)
     assert_array_identical(s.arrays.a[0],
                            np.array([1, 2, 3], dtype=np.int16))
     assert_array_identical(s.arrays.b[0],
                            np.array([4., 5., 6., 7.], dtype=np.float32))
     assert_array_identical(
         s.arrays.c[0],
         np.array([np.complex64(1 + 2j),
                   np.complex64(7 + 8j)]))
     assert_array_identical(
         s.arrays.d[0],
         np.array(asbytes_nested(["cheese", "bacon", "spam"]),
                  dtype=np.object))
Exemple #19
0
def test5():
    # When testing on the Python console use:
    # import idlsave
    # s = idlsave.read("Data/20160306rkn.sav")

    # try to read in an IDL .sav file
    cur_path = os.path.dirname(__file__)  # where we are

    s = idlsave.read("Data/20160306rkn.sav")
    vel = s.fit['v_e']
    beam = s.prm['bmnum']
    freq = s.prm['tfreq']
    print(len(vel))
    print(len(beam))
    print(freq[1:5])
Exemple #20
0
def load_data(obsname):
    """
    load data file given an obs date/number 
    
    Example
    -------
    >>> data = load_data('gal_010.47+00.03/130820_ob3')
    """
    if 'coadd' in obsname:
        obsdir, obsname = os.path.split(obsname)
        filename = obsname + "_clean_music_20130815.sav"
        filename = filename[:15] + "%i" + filename[16:]
        filename = os.path.join(obsdir, filename)
    else:
        filename = obsname + "_band%ii_clean_music_20130815_map.sav"
    data = {k: idlsave.read(filename % k, verbose=False) for k in xrange(4)}
    return data
Exemple #21
0
    def __init__(self,starsfile,var='stars'):
        junk = idlsave.read(starsfile)
        self.stars = junk[var]
        self.star = self.stars[0]
        self.fields = np.array(self.stars.dtype.names)
        
        #make fields lowercase
        for i in range(len(self.fields)):
            self.fields[i] = self.fields[i].lower()

        #assign attributes to class
        for field in self.fields:
            if type(self.star[field]) is np.ndarray:
                unpackedfield = savunpack.savunpack(self.stars[field])
                exec('self.'+field+' = unpackedfield')
            else:
                exec('self.'+field+' = self.stars["'+field+'"]')
Exemple #22
0
def load_data(obsname):
    """
    load data file given an obs date/number 
    
    Example
    -------
    >>> data = load_data('gal_010.47+00.03/130820_ob3')
    """
    if 'coadd' in obsname:
        obsdir,obsname = os.path.split(obsname)
        filename = obsname+"_clean_music_20130815.sav"
        filename = filename[:15]+"%i"+filename[16:]
        filename = os.path.join(obsdir,filename)
    else:
        filename = obsname+"_band%ii_clean_music_20130815_map.sav"
    data = {k:idlsave.read(filename % k, verbose=False) for k in xrange(4)}
    return data
 def test_scalars_replicated_3d(self):
     s = idlsave.read(path.join(DATA_PATH,
                                'struct_scalars_replicated_3d.sav'),
                      verbose=False)
     assert_identical(s.scalars_rep.a,
                      np.repeat(np.int16(1), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.b,
                      np.repeat(np.int32(2), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.c,
                      np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
     assert_identical(s.scalars_rep.d,
                      np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
     assert_identical(
         s.scalars_rep.e,
         np.repeat(asbytes("spam"), 24).reshape(4, 3, 2).astype(np.object))
     assert_identical(
         s.scalars_rep.f,
         np.repeat(np.complex64(-1. + 3j), 24).reshape(4, 3, 2))
Exemple #24
0
    def test_arrays_replicated(self):

        s = idlsave.read(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)

        # Check column types
        assert_true(s.arrays_rep.g.dtype.type is np.object_)
        assert_true(s.arrays_rep.h.dtype.type is np.object_)

        # Check column shapes
        assert_equal(s.arrays_rep.g.shape, (5, ))
        assert_equal(s.arrays_rep.h.shape, (5, ))

        # Check values
        for i in range(5):
            assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_))
            assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_))
            assert_true(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0])))
            assert_true(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0])))
Exemple #25
0
def read_grid( model_filepath=None ):
    z = idlsave.read( model_filepath )['mmd']
    mus = np.array( z['mu']  )
    wavs_A_in = z['lam']
    intensities_in = z['flx']    
    nang = len( mus )
    nwav = len( wavs_A_in[0] )
    wavs_nm = wavs_A_in[0]/10.
    intensities = np.zeros( [nwav,nang] )
    for i in range( nang ):
        intensities[:,i] = intensities_in[i]
    # Discard the mu=0 point:
    mus = mus[1:]
    intensities = intensities[:,1:]
    # Reverse order so that mu decreases from 1:
    mus = mus[::-1]
    intensities = intensities[:,::-1]
    # TODO = Understand units of these output intensities/fluxes.
    return mus, wavs_nm, intensities
 def test_compressed(self):
     s = idlsave.read(path.join(DATA_PATH, 'various_compressed.sav'),
                      verbose=False)
     assert_identical(s.i8u, np.uint8(234))
     assert_identical(s.f32, np.float32(-3.1234567e+37))
     assert_identical(
         s.c64,
         np.complex128(1.1987253647623157e+112 - 5.1987258887729157e+307j))
     assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
     assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
     assert_identical(s.arrays.b[0],
                      np.array([4., 5., 6., 7.], dtype=np.float32))
     assert_identical(
         s.arrays.c[0],
         np.array([np.complex64(1 + 2j),
                   np.complex64(7 + 8j)]))
     assert_identical(
         s.arrays.d[0],
         np.array(asbytes_nested(["cheese", "bacon", "spam"]),
                  dtype=np.object))
Exemple #27
0
def getDistributionStarsGas():
    '''
    Distrubtion in stars and gas
    '''

    harveyDataDir = \
      '/Users/DavidHarvey/Documents/Work/SIDM_science/clusters/'

    clusters = glob.glob(harveyDataDir+'/*')
    listOfMergerHalos = []
    for iCluster in clusters:
        print iCluster
        if os.path.isfile( iCluster+'/bulletsFLUX.sav'):
            
            halos = idl.read(iCluster+'/bulletsFLUX.sav')['halos']
            for iHalo in halos:
                clusterName = iCluster.split('/')[-1]
                iMerger = mergerHalo(clusterName, iHalo)
                iMerger.calculateBulletVectors()
                iMerger.ouputDistSG('Harvey15distances.dat')
                listOfMergerHalos.append(iMerger)


    sys.exit()
    simName = 'CDM'
    #simulatedClusters = ClusterSample(simName)

    #simulatedClusters.extractMergerHalos()

    #simulatedClusters.CalculateOffsetVectors(nClusters=5)

    bins = np.linspace(0,1000,100)
    
    totalSGdistribution = [ i.dist_sg[0] for i in listOfMergerHalos]
    
    proposedClusterRadius = getProposedClusterRadii()
    y,x,p = plt.hist( totalSGdistribution, bins=bins, density=True)
    plt.hist( proposedClusterRadius, bins=bins,density=True)
    plt.plot( [np.median(proposedClusterRadius),np.median(proposedClusterRadius)],[0.,np.max(y)],'-')

    plt.show()
Exemple #28
0
    def test_arrays_replicated(self):

        s = idlsave.read(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False)

        # Check column types
        assert_true(s.arrays_rep.a.dtype.type is np.object_)
        assert_true(s.arrays_rep.b.dtype.type is np.object_)
        assert_true(s.arrays_rep.c.dtype.type is np.object_)
        assert_true(s.arrays_rep.d.dtype.type is np.object_)

        # Check column shapes
        assert_equal(s.arrays_rep.a.shape, (5, ))
        assert_equal(s.arrays_rep.b.shape, (5, ))
        assert_equal(s.arrays_rep.c.shape, (5, ))
        assert_equal(s.arrays_rep.d.shape, (5, ))

        # Check values
        for i in range(5):
            assert_array_identical(s.arrays_rep.a[i], np.array([1, 2, 3], dtype=np.int16))
            assert_array_identical(s.arrays_rep.b[i], np.array([4., 5., 6., 7.], dtype=np.float32))
            assert_array_identical(s.arrays_rep.c[i], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
            assert_array_identical(s.arrays_rep.d[i], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object))
Exemple #29
0
 def test_pointers_replicated_3d(self):
     s = idlsave.read(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
     assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_))
     assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_))
     assert_true(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
Exemple #30
0
from pylab import *
import numpy, scipy, matplotlib, pyfits
import idlsave
import itertools
import scipy.ndimage
from astropy.nddata.convolution import make_kernel, convolve

data_w49_1 = {
    k: idlsave.read('w49/130819_ob5_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_w49_2 = {
    k: idlsave.read('w49/130819_ob6_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_w51_1 = {
    k: idlsave.read(
        'w51_bgps/130819_ob1_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_w51_2 = {
    k: idlsave.read(
        'w51_bgps/130819_ob2_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_sgrb2_1 = {
    k:
    idlsave.read('sgr_b2/130819_ob3_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_sgrb2_2 = {
Exemple #31
0
 def test_scalars(self):
     s = idlsave.read(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
     assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_))
     assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_))
     assert_true(id(s.pointers.g[0]) == id(s.pointers.h[0]))
def get_that_data(cube):
    from astropy.io import fits
    import idlsave
    import numpy as np

    data = idlsave.read(f'{cube}')
    print('Data headers: ', data.keys())
    header = input("Which header? ")
    head = data[f'{header}']  # a recarray
    tabledata = head.table_data  # a ndarray
    print('Components: ', tabledata[0].pkey)

    # the different headers are organized differently:
    if header == 'emlwav' or header == 'emlwaverr' or header == 'emlsig' or header == 'emlsigerr':
        comp1 = tabledata[0][2][1][0][8]
        comp2 = tabledata[0][1][1][0][8]
        print('Emission lines: ', comp1.pkey)
        eline = input("Which line? ")
        comps = {'c2': comp2, 'c1': comp1}

    elif header == 'emlflx' or header == 'emlflxerr':
        ftot = tabledata[0][0][1][0][8]
        comp2 = tabledata[0][1][1][0][8]
        comp2pk = tabledata[0][4][1][0][8]
        comp1 = tabledata[0][5][1][0][8]
        comp1pk = tabledata[0][6][1][0][8]
        print('Emission lines: ', ftot.pkey)
        eline = input("Which line? ")
        comps = {
            'ftot': ftot,
            'c2': comp2,
            'c2pk': comp2pk,
            'c1': comp1,
            'c1pk': comp1pk
        }

    elif header == 'emlcvdf':
        fluxerr = tabledata[0][0][1][0][8]
        vel = tabledata[0][1]
        cumfluxnorm = tabledata[0][2][1][0][8]
        cumfluxnormerr = tabledata[0][3][1][0][8]
        flux = tabledata[0][5][1][0][8]
        print('Emission lines: ', flux.pkey)
        eline = input("Which line? ")
        comps = {
            'fluxerr': fluxerr,
            'flux': flux,
            'vel': vel,
            'cfn': cumfluxnorm,
            'cfnerr': cumfluxnormerr
        }
    else:
        print('Header does not exist')

    # loop through each component in the header:
    for c in comps:
        for key in range(len(comps[c].pkey)):
            line = comps[c].pkey[key]
            if line != None:
                line = line.decode('utf-8')
                if line == eline:
                    print('Line: ', line)
                    data = comps[c].pvalue[key]
                    data[data > 1e90] = np.nan  # call all bad data nan
                    fits.writeto(f'{header}_{c}_{line}.fits', data)
                elif eline == 'all':
                    print('Line: ', line)
                    data = comps[c].pvalue[key]
                    data[data > 1e90] = np.nan  # call all bad data nan
                    fits.writeto(f'{header}_{c}_{line}.fits', data)
Exemple #33
0
def read_file(filename):
    s = idlsave.read(filename)
    data = s.sme
    return data.name, data.mass, data.vmag, data.ra, data.dec, \
            data.radius_iso, data.teff
Exemple #34
0
            #Otherwise, create a cors directory for this RSR file in the save path (check
            # to see if one is there already).
            save_path = save_path_lvl0 + cors_file + '/'
            if size(glob.glob(save_path)) == 0: os.system('mkdir ' + save_path)

            #Now read in the index flag info. Since single rsr files can have both ingress
            # and egress, this file may contain either 1 or 2 lines to include ingress (0)
            # only, egress (1) only, or both (0 and 1).
            index_start, index_stop, geom = loadtxt(rsr_files[j]+\
             '/full_output/flag_indices.txt',skiprows=1,usecols=[0,1,2],unpack=True)
            #Convert these index arrays to integers (as they are read in as floats).
            index_start, index_end, geom = index_start.astype(int),index_stop.astype(int),\
             geom.astype(int)

            #Now read in the IDL data
            idl_data = idlsave.read(rsr_files[j] + '/full_output/output.sav',
                                    verbose=False)

            #Open the file in which to save the output data
            save_filename = '_'.join(rsr_file.split('.')) + '_freq_v01.csv'
            save_filename = save_filename.upper()
            save_file = open(save_path + save_filename, 'w')

            #Generate a string that comprises this line in the save file
            for k in range(size(idl_data.sfduyearout)):
                s = ''
                s += '%10.i' % idl_data.sfduyearout[k] + ','
                s += '%10.i' % idl_data.sfdudoyout[k] + ','
                s += '%20.3f' % idl_data.sfdusecout[k] + ','
                s += '%10.i' % idl_data.rftoifmhzout[k] + ','
                s += '%10.i' % idl_data.ddclomhzout[k] + ','
                s += '%20.12e' % idl_data.ncofreqout[k] + ','
Exemple #35
0
 def test_complex64(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
     assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
Exemple #36
0
 def test_int16(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
     assert_identical(s.i16s, np.int16(-23456))
Exemple #37
0
 def test_float32(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
     assert_identical(s.f32, np.float32(-3.1234567e+37))
Exemple #38
0
ax1.minorticks_on()
ax1.set_title('Individual EDP',fontsize='x-large')

#For each, load in all of the EDP, parse the profile files, and plot
all_IDs = []
topsides = zeros(size(obs_list))
avg_topside, std_topside, rms_topside = zeros(size(obs_list)), zeros(size(obs_list)), \
	zeros(size(obs_list))
print('')
print('Mean          STD           RMS           Max(alt)')


for i in range(size(obs_list)):
	all_IDs += [obs_list[i].split('\\')[-1]]

	idl_data = idlsave.read(obs_list[i]+'\\'+flyby+'_'+all_IDs[i]+'_edp_pat.sav',verbose=0)

	alt = idl_data.occptradiusarraysav
	lat = idl_data.occptlatarraysav
	ed = idl_data.edpsav

	if representation == 'ellipsoidal':
		#Calculate saturn_R, the ellipsoidal represntation of Saturn's surface, as a function of latitude
		saturn_R = a*b/(np.sqrt((b*np.cos(lat*np.pi/180.))**2.+(a*np.sin(lat*np.pi/180.))**2.))
		#Calculate occptradius - R
		r_minus_R = alt - saturn_R
		#Calculate the "representative" poinnt, which is closest to height of 2000 km (representative of
		#	ionospheric peak)
		rep_point_loc = np.where(abs(r_minus_R-2000) == np.min(abs(r_minus_R-2000)))[0]
		#Subtract off the height of representative point
		alt -= saturn_R[rep_point_loc]
Exemple #39
0
 def __init__( self,  shotnumber,  params = "refQVW11383.sav",  withgpu = False):
     self.shot = shotnumber
     self.timestamp = []
     if ( not _is_pycuda): 
         self.withgpu =  False
     else:
         self.withgpu = withgpu
         
     self._ref_bands = ""
     qvwref = read( join(expanduser('.'), "params", params),  verbose=False)
     self._refname = params
     if "pq" in qvwref and "freq" in qvwref:
         ph0q = qvwref["pq"].p[0][:self.Nqv]
         freq = qvwref["freq"][:self.Nqv]
         self._ref_bands = self._ref_bands + "q"
     else:
         ph0q = array([])
         freq = array([])
         
     if "pv" in qvwref and "frev" in qvwref and "isv" in qvwref and ("ioff" in qvwref or "offset" in qvwref):
         ph0v = qvwref["pv"].p[0] [:self.Nqv]
         frev = qvwref["frev"][:self.Nqv]
         isv = qvwref['isv']
         ioff = qvwref['ioff'] if "ioff" in qvwref else qvwref["offset"]
         self._ref_bands = self._ref_bands + "v"
     else:
         ph0v = array([])
         frev = array([])
         isv = -1
         ioff = -1
         
     if "pw" in qvwref and "frew" in qvwref:
         ph0w = qvwref["pw"].p[0] 
         frew = qvwref["frew"]
         isw = qvwref['isw']
         ioffw = qvwref['ioff']
         self._ref_bands = self._ref_bands + "w"
     else:
         ph0w = array([])
         frew = array([])
         isw = -1
         ioffw = -1
         
     nHead = 24
     fSample = 100.0 #MHz
     period = 5000                 
     if frew.size > 3000:
         self.nHead = {'q':nHead, 'v':nHead, 'w':nHead*2}
         self.fSample = {'q':fSample, 'v':fSample, 'w':fSample*2}
         self.period = {'q':period, 'v':period, 'w':period*2}
         isw = isw*2 if isw >= 0 else -1
         ioffw = ioffw*2 if ioffw >= 0 else -1
         ph0w = ph0w[:self.Nw]
         frew = frew[:self.Nw]
     else:
         self.nHead = {'q':nHead, 'v':nHead, 'w':nHead}
         self.fSample = {'q':fSample, 'v':fSample, 'w':fSample}
         self.period = {'q':period, 'v':period, 'w':period}
         ph0w = ph0w[:self.Nqv]
         frew = frew[:self.Nqv]
     #ieq = qvwref['ieq']
     if "v" in self._ref_bands:
         l_tmp = nonzero( freq >= frev[isv])[0]
         ieq = l_tmp[0] if l_tmp != [] else freq.size-1
     else:
         ieq = freq.size-1
     if "w" in self._ref_bands:
         l_tmp = nonzero( frev >= frew[isw])[0]
         iev = l_tmp[0] if l_tmp != [] else frev.size-1
     else:
         iev = frev.size-1
     
     self.istart = {'q':0, 'v':isv, 'w':isw}
     self.iend = {'q':ieq, 'v':iev, 'w':frew.size-1}
     self.ioff = {'q':0, 'v':ioff, 'w':ioffw}
     self.ph0ref = {'q':ph0q, 'v':ph0v, 'w':ph0w}
     self.faxis = {'q':freq, 'v':frev, 'w':frew}
     self.nscale = { 'q':50, 'v':50, 'w':50 }
     self.window = { 'q':array([[]]), 'v':array([[]]), 'w':array([[]])}
     self._sigready = False
import sys

import idlsave
from datetime import date, timedelta

file_path = 'gev_nar.sav'
sav_file = idlsave.read(file_path)

gv = sav_file.gv  # Flares
nar = sav_file.nar  # ARs

times = [int(entry[0]) for entry in gv]
print('Maximum time since beginning of day (in hundredths):', max(times))

print('Last GEV record:')
print(gv[-1])

print('Last NAR record:')
print(nar[-1])

day_val = -1  # Number of days of particular gev entry since December 31, 1978

print('C2.2 class flare with AR #2672:')
for entry in gv:
    # Two digit numbers are ASCII values of chars (that's just the way they are stored in the sav file)
    if int(entry[4][0]) == 67 and int(entry[4][1]) == 50 and int(
            entry[4][3]) == 50 and int(entry[7]) == 2672:
        print(entry)
        day_val = int(entry[1])
        break
Exemple #41
0
def MakeTxtFile(in_file=None):
    """
    Purpose:
        To read in an IDL.sav file, filter for only the data of interest, and put the important info into a text file,
        The text file can then be read quickly by other analysis programs.
    Pre-conditions:
        :param: in_file: the IDL .sav file that you would like to create a text file from
        If no in_file is given, then Sept 5, 2016 is selected by default.  The given IDL file must be in the
        Data folder of this project
    Post-conditions:
        A text file is created in the same location as the IDL .sav file was read in from.
        The created file has the same name except with the time it was filtered for attached
        Currnet form of output text file is:

        Gate    Ut decimal time     frequency       velocity
        xxx     xxx                 xx              xx
        xxx     xxx                 xx              xx

    Return:
        none
    """
    START_UT = 1.50
    END_UT = 8.00
    PWR_MIN = 3     # minimum power in dB
    N_DECIMAL_PLACES = 5    # number of decimal points to keep when printing to txt file

    if not in_file:
        in_file = "20160905rkn"

    # Read in data from IDL .sav FILE
    print("Reading in IDL.sav file...")
    s = idlsave.read("Data/" + in_file + ".sav")

    # fit data is read in multidimensional arrays of size # of measurements by gates
    # i.e. vel[measurement_idx][gate_idx]

    # prm data is all different
    # Pick out everything that is important from prm
    print("Getting prm data...")
    time = makeTimeArray(s.prm['time'])
    freq = s.prm['tfreq']/1000.
    gate = np.arange(0, 75, 1)

    # duplicate the prm things so they will line up with the flattened things from fit
    NumOfMeasurements = len(s.fit['v'])
    print("# of measurements: " + str(NumOfMeasurements))

    time = np.repeat(time, len(gate))
    freq = np.repeat(freq, len(gate))
    gate = np.tile(gate, NumOfMeasurements)

    # Pick out the important things from fit and flatten them in row-major (C-style) order
    print("Getting and flattening fit data...")
    qflg = flattenFitArrays(s.fit['qflg'])
    gflg = flattenFitArrays(s.fit['gflg'])
    vel = flattenFitArrays(s.fit['v'])
    pwr = flattenFitArrays(s.fit['pwr0'])

    # filter for low quality points or those flagged as ground scatter
    valid = (time >= START_UT) & (time <= END_UT) & (qflg == 1) & (gflg == 0) & (pwr >= PWR_MIN)

    # Write to text file
    print("Writing data to file...")
    OUT_FILE = in_file + "_" + str(START_UT) + "-" + str(END_UT) + "UT"
    full_file_name = "Data/" + OUT_FILE + ".txt"
    with open(full_file_name, "w+") as f:
        writer = csv.writer(f, delimiter='\t')
        writer.writerows(zip(gate[valid], np.round(time[valid], N_DECIMAL_PLACES), freq[valid],
                             np.round(vel[valid], N_DECIMAL_PLACES)))

    # remove blank lines from file
    print("Cleaning up file...")
    with open(full_file_name) as filehandle:
        lines = filehandle.readlines()
    with open(full_file_name, 'w') as filehandle:
        lines = filter(lambda x: x.strip(), lines)
        filehandle.writelines(lines)

    print("Program Complete")
Exemple #42
0
import numpy as np
import idlsave

data = idlsave.read("./Bethermin2012model_grids.save")

# band passes for each experiment
band = data.get('band')
#array(['MIPS24', 'PACS70', 'PACS100', 'PACS160', 'SPIRE250', 'SPIRE350',
#       'Planck857GHz', 'SPIRE500', 'Planck545GHz', 'SCUBA850',
#       'Planck353GHz', 'AzTEC', 'Planck217GHz', 'Planck143GHz',
#       'Planck100GHz', 'Radio'], dtype=object)

# corresponding central frequency
Nu = data.get('lambda') * 1.e-6  # wavelength, converted from microns to meters
Nu = 3.e8 / Nu  # frequency in Hz

# counts per unit flux and redshift shell, for each frequency band
z = data.get('z')
Snu = data.get('snu')  # in Jy
dNdSnudz = data.get(
    'dndsnudz'
)  # counts of all sources (in gal/Jy/sr, Nband * Nredshift * Nflux array)

# remove nans
dNdSnudz = np.nan_to_num(dNdSnudz)

# save to separate files
np.savetxt("./converted/z.txt", z)
np.savetxt("./converted/Snu.txt", Snu)
np.savetxt("./converted/dNdSnudz_Planck100GHz.txt", dNdSnudz[14, :, :])
np.savetxt("./converted/dNdSnudz_Planck143GHz.txt", dNdSnudz[13, :, :])
Exemple #43
0
SHOWFIG = True

# We set a large possible set of periodicities
PeriodMinMax = [5, 20]
makedirs(saveFolder, exist_ok=True)

# IN SITU DATA
df_is = pd.read_csv(f"{dataFolder}small_ch_in_situ.csv")
df_is.index = pd.to_datetime(df_is["Time"])
del df_is["Time"]

insituParams = ["Vr", "Mf", "Np", "T", "Br"]
df_is = df_is[insituParams]

# REMOTE DATA
rs_171 = idlsave.read(f'{dataFolder}small_ch_171_lc_in.sav', verbose=False)
rs_193 = idlsave.read(f'{dataFolder}small_ch_193_lc_in.sav', verbose=False)
ch_flux = idlsave.read(f'{dataFolder}chflux.sav', verbose=False)

# 171 and 193 observations
time_array = rs_171.date_obs_171.copy()
time_array = [t.decode() for t in list(time_array)]

df_171 = pd.DataFrame(
    {
        'plume': rs_171.lc_171_plume_in,
        'cbpoint': rs_171.lc_171_bp_in,
        'chplume': rs_171.lc_171_ch_plume_in,
        'chole': rs_171.lc_171_ch_in,
        'qsun': rs_171.lc_171_qs_in,
    },
Exemple #44
0
 def test_complex32(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
     assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
Exemple #45
0
groupvmin = groupvel - 1000
groupvmax = groupvel + 1000
groupvmin = groupvel - 3. * groupsigma
groupvmax = groupvel + 3. * groupsigma
#calculate vcenter, vmin and vmax for each group according to the filter we will observe it through
for i in range(len(groupfiltcenter)):
    groupfiltcenter[i] = filtercenter[groupfilter[i]]
    groupfiltvcenter[i] = (groupfiltcenter[i] / 6563. - 1) * 3.e5
    groupfiltvmin[i] = (
        (groupfiltcenter[i] - 0.5 * filterwidth) / 6563. - 1) * 3.e5
    groupfiltvmax[i] = (
        (groupfiltcenter[i] + 0.5 * filterwidth) / 6563. - 1) * 3.e5

#change this to where your agc file is located
s = idlsave.read('/Users/rfinn/idl/programs/idl_alfa/agcnorth.sav')
#      Dtype=[(('agcnumber', 'AGCNUMBER'), '|O8'), (('which', 'WHICH'), '|O8'), (('rah', 'RAH'), '|O8'), (('ram', 'RAM'), '|O8'), (('ras10', 'RAS10'), '|O8'), (('sign', 'SIGN'), '|O8'), (('decd', 'DECD'), '|O8'), (('decm', 'DECM'), '|O8'), (('decs', 'DECS'), '|O8'), (('a100', 'A100'), '|O8'), (('b100', 'B100'), '|O8'), (('mag10', 'MAG10'), '|O8'), (('inccode', 'INCCODE'), '|O8'), (('posang', 'POSANG'), '|O8'), (('description', 'DESCRIPTION'), '|O8'), (('bsteintype', 'BSTEINTYPE'), '|O8'), (('vopt', 'VOPT'), '|O8'), (('verr', 'VERR'), '|O8'), (('extrc3', 'EXTRC3'), '|O8'), (('extdirbe', 'EXTDIRBE'), '|O8'), (('vsource', 'VSOURCE'), '|O8'), (('ngcic', 'NGCIC'), '|O8'), (('flux100', 'FLUX100'), '|O8'), (('rms100', 'RMS100'), '|O8'), (('v21', 'V21'), '|O8'), (('width', 'WIDTH'), '|O8'), (('widtherr', 'WIDTHERR'), '|O8'), (('widthcode', 'WIDTHCODE'), '|O8'), (('telcode', 'TELCODE'), '|O8'), (('detcode', 'DETCODE'), '|O8'), (('hisource', 'HISOURCE'), '|O8'), (('statuscode', 'STATUSCODE'), '|O8'), (('snratio', 'SNRATIO'), '|O8'), (('ibandqual', 'IBANDQUAL'), '|O8'), (('ibandsrc', 'IBANDSRC'), '|O8'), (('irasflag', 'IRASFLAG'), '|O8'), (('icluster', 'ICLUSTER'), '|O8'), (('hidata', 'HIDATA'), '|O8'), (('iposition', 'IPOSITION'), '|O8'), (('ipalomar', 'IPALOMAR'), '|O8'), (('rc3flag', 'RC3FLAG'), '|O8'), (('irotcat', 'IROTCAT'), '|O8'), (('newstuff', 'NEWSTUFF'), '|O8')])

ra = 15. * (s.agcnorth.rah[0] + s.agcnorth.ram[0] / 60. +
            s.agcnorth.ras10[0] / 10. / 3600.)  #Convert to degrees
dec = s.agcnorth.decd[0] + s.agcnorth.decm[0] / 60. + s.agcnorth.decs[0] / 3600.
agcvopt = s.agcnorth.vopt[0]
agcflux100 = s.agcnorth.flux100[0]
agcmag10 = s.agcnorth.mag10[0]

dr = 5.  #radial search in degrees
#offset of the field center in degrees, if desired
deltadec = array([0., 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'f')
deltara = array([.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'f')

Exemple #46
0
import pdb
import os
import idlsave
import numpy as np

cors = ['0106', '0107', '0108']
cors_path = 'C:\\Users\\tambu\\Documents\\BU\\Science Projects\\SaturnEDP\\PD_MBP_files\\Output\\Saturn\\'
for k in range(len(cors)):
    path = cors_path + 'cors_' + cors[k] + '\\'
    sub_dirs = [x for x in os.listdir(path) if os.path.isdir(path + x)]
    for j in range(len(sub_dirs)):
        rsr_data_path = path + sub_dirs[j] + '\\full_output\\output.sav'
        idl_data = idlsave.read(rsr_data_path, verbose=0)
        sss = sub_dirs[j][0:3]
        tt = sub_dirs[j][3:5]
        aa = sub_dirs[j][5:7]
        yyyy = str(idl_data['sfduyearout'][0])
        ddd = str(idl_data['sfdudoyout'][0])
        if len(ddd) == 1:
            ddd = '00' + ddd
        elif len(ddd) == 2:
            ddd = '0' + ddd
        h = int(idl_data['sfdusecout'][0] / 3600)
        if h < 10:
            hh = '0' + str(h)
        else:
            hh = str(h)
        m = int(np.floor((idl_data['sfdusecout'][0] - (h * 3600)) / 60))
        if m < 10:
            mm = '0' + str(m)
        else:
Exemple #47
0
 def test_byte(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
     assert_identical(s.i8u, np.uint8(234))
Exemple #48
0
    groupfiltcenter[i] = filtercenter[groupfilter[i]]
    groupfiltvcenter[i] = (groupfiltcenter[i] / 6563. - 1) * 3.e5
    groupfiltvmin[i] = (
        (groupfiltcenter[i] - 0.5 * filterwidth) / 6563. - 1) * 3.e5
    groupfiltvmax[i] = (
        (groupfiltcenter[i] + 0.5 * filterwidth) / 6563. - 1) * 3.e5

#Get current path so program can tell if this is being run on Becky or Rose's computer
mypath = os.getcwd()
if mypath.find('rfinn') > -1:
    print "Running on Rose's computer"
    agcfile = '/Users/rfinn/idl/programs/idl_alfa/agctotal.sav'
elif mypath.find('koopmanr') > -1:
    print "Running on Becky's computer"
    agcfile = '/Users/koopmanr/idl_alfa/agctotal.sav'
s = idlsave.read(agcfile)

#change this to where your agc file is located

#s=idlsave.read('/Users/koopmanr/idl_alfa/agctotal.sav')

#      Dtype=[(('agcnumber', 'AGCNUMBER'), '|O8'), (('which', 'WHICH'), '|O8'), (('rah', 'RAH'), '|O8'), (('ram', 'RAM'), '|O8'), (('ras10', 'RAS10'), '|O8'), (('sign', 'SIGN'), '|O8'), (('decd', 'DECD'), '|O8'), (('decm', 'DECM'), '|O8'), (('decs', 'DECS'), '|O8'), (('a100', 'A100'), '|O8'), (('b100', 'B100'), '|O8'), (('mag10', 'MAG10'), '|O8'), (('inccode', 'INCCODE'), '|O8'), (('posang', 'POSANG'), '|O8'), (('description', 'DESCRIPTION'), '|O8'), (('bsteintype', 'BSTEINTYPE'), '|O8'), (('vopt', 'VOPT'), '|O8'), (('verr', 'VERR'), '|O8'), (('extrc3', 'EXTRC3'), '|O8'), (('extdirbe', 'EXTDIRBE'), '|O8'), (('vsource', 'VSOURCE'), '|O8'), (('ngcic', 'NGCIC'), '|O8'), (('flux100', 'FLUX100'), '|O8'), (('rms100', 'RMS100'), '|O8'), (('v21', 'V21'), '|O8'), (('width', 'WIDTH'), '|O8'), (('widtherr', 'WIDTHERR'), '|O8'), (('widthcode', 'WIDTHCODE'), '|O8'), (('telcode', 'TELCODE'), '|O8'), (('detcode', 'DETCODE'), '|O8'), (('hisource', 'HISOURCE'), '|O8'), (('statuscode', 'STATUSCODE'), '|O8'), (('snratio', 'SNRATIO'), '|O8'), (('ibandqual', 'IBANDQUAL'), '|O8'), (('ibandsrc', 'IBANDSRC'), '|O8'), (('irasflag', 'IRASFLAG'), '|O8'), (('icluster', 'ICLUSTER'), '|O8'), (('hidata', 'HIDATA'), '|O8'), (('iposition', 'IPOSITION'), '|O8'), (('ipalomar', 'IPALOMAR'), '|O8'), (('rc3flag', 'RC3FLAG'), '|O8'), (('irotcat', 'IROTCAT'), '|O8'), (('newstuff', 'NEWSTUFF'), '|O8')])

ra = 15. * (s.agctotal.rah[0] + s.agctotal.ram[0] / 60. +
            s.agctotal.ras10[0] / 10. / 3600.)  #Convert to degrees
dec = s.agctotal.decd[0] + s.agctotal.decm[0] / 60. + s.agctotal.decs[0] / 3600.
agcvopt = s.agctotal.vopt[0]
agcflux100 = s.agctotal.flux100[0]
agcmag10 = s.agctotal.mag10[0]

dr = 4.  #radial search in degrees
Exemple #49
0
 def test_int32(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
     assert_identical(s.i32s, np.int32(-1234567890))
Exemple #50
0
import datetime as D


def fromJulian(j):
    '''
    Converts Julian Date to human readable format

    :return: human readable date and time
    '''
    days = j - 2440587.5
    sec = days * 86400.0
    return time.gmtime(sec)


if __name__ == '__main__':
    data = idlsave.read('nes_all.dat')

    exclude_first = True
    legend = True

    c = 1
    #ran = [4,5,6,7]
    ran = [4, 5, 6, 9]

    for i in ran:
        if exclude_first:
            start = 1
            print '\nAll below fits exclude the first point shown in the plots as it was measured under different conditions'
        else:
            start = 0
Exemple #51
0
 def test_float64(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
     assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def rerun_mocplot_amiy(username):
    #rerun_mocplot_amiy, 'bill'
    am.cd_amiy('/Users/' + username + '/mocassin-rw_changes/output')
    s = idlsave.read('mocplot_variables.sav')
    #mocplot_Clumpy(infile,paramfile)
    print('done plotting again!!!')
Exemple #53
0
 def test_bytes(self):
     s = idlsave.read(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
     assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
from astropy.io import fits
import idlsave
import itertools
import scipy.ndimage
from astropy.nddata.convolution import make_kernel, convolve
import numpy as np
import sys
import os
sys.path.append(os.path.split(os.getcwd())[0])
from convolve_match_makefits import convolve_and_match

data_g010_1 = {
    k: idlsave.read(
        'gal_010.47+00.03/130820_ob3_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_g010_2 = {
    k: idlsave.read(
        'gal_010.47+00.03/130820_ob4_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_g000_1 = {
    k: idlsave.read(
        'gal_0.253+0.016/130820_ob1_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
data_g000_2 = {
    k: idlsave.read(
        'gal_0.253+0.016/130820_ob2_band%ii_clean_music_20130815_map.sav' % k)
    for k in xrange(4)
}
Exemple #55
0
    ax1.hist(x, bins=50, histtype="step", color="k", range=(0, 13.807108309208775))
    ax3 = P.subplot(224)
    ax3.tick_params(axis="x", labelbottom="off")
    ax3.tick_params(axis="y", labelleft="off")
    ax3.hist(y, bins=50, orientation="horizontal", histtype="step", color="k", range=(0, 3))
    P.subplots_adjust(wspace=0.05)
    P.subplots_adjust(hspace=0.05)
    cbar_ax = fig.add_axes([0.522, 0.51, 0.02, 0.39])
    cb = fig.colorbar(im, cax=cbar_ax)
    cb.solids.set_edgecolor("face")
    cb.set_label(r"predicted SFR $[M_{\odot} yr^{-1}]$", labelpad=20)
    return fig


# Load the filters in order to calculate fluxes in each bandpass
filters = idlsave.read("/Users/becky/Projects/Green-Valley-Project/Kevin_IDL/ugriz.sav")
fuvwave = filters.ugriz.fuvwave[0]
fuvtrans = filters.ugriz.fuvtrans[0]
nuvwave = filters.ugriz.nuvwave[0]
nuvtrans = filters.ugriz.nuvtrans[0]
uwave = filters.ugriz.uwave[0]
utrans = filters.ugriz.utrans[0]
gwave = filters.ugriz.gwave[0]
gtrans = filters.ugriz.gtrans[0]
rwave = filters.ugriz.rwave[0]
rtrans = filters.ugriz.rtrans[0]
iwave = filters.ugriz.iwave[0]
itrans = filters.ugriz.itrans[0]
zwave = filters.ugriz.zwave[0]
ztrans = filters.ugriz.ztrans[0]
vwave = filters.ugriz.vwave[0]
Exemple #56
0
flyby = 'S07'
obs_type = 'N'
obs = flyby+obs_type
sss = 's10'

#Set paths
root_path = 'C:\\Users\\tambu\\Documents\\BU\\Science Projects\\SaturnEDP\\PD_MBP_files\\'
edp_path = root_path+'EDP\\'

#Get all the applicable directories
obs_list = [edp_path+body+'\\'+flyby+'\\'+x for x in os.listdir(edp_path+body+'\\'+flyby+'\\') if (os.path.isdir(edp_path+body+'\\'+flyby+'\\'+x)) and (obs[-1] in x)]


#Output indn and aven files as csv.
for i in range(len(obs_list)):
    idl_data = idlsave.read(obs_list[i]+'\\'+flyby+'_'+obs_list[i].split('\\')[-1]+'_edp.sav',verbose=0)    
    if obs_type == 'N':
        aa = 'oi'
    elif obs_type == 'X':
        aa = 'oe'
    yyyy = str(idl_data['sfduyearoutxr'][0])
    ddd = str(idl_data['sfdudoyoutxr'][0])
    if len(ddd) == 1:
        ddd = '00'+ddd
    elif len(ddd) == 2:
        ddd = '0'+ddd
    
    hhmm =  str(idl_data['utcrxarraysav'][0]).split(':')[0][-2:] + str(idl_data['utcrxarraysav'][0]).split(':')[1]
    t = obs_list[i].split('\\')[-1].split('_')[0][-1].lower()
    bb = obs_list[i].split('\\')[-1].split('_')[1].lower()
    nn = obs_list[i].split('\\')[-1].split('_')[2]
	def __init__(self, observations, output, border_x1=0, border_x2=0, border_y1=0, border_y2=0, same_as_training=0, network_path='network'):
		"""
		---------
		Keywords:
		---------
		
		observations: Input array of shape (nx, ny, n_times*n_inputs) where
						nx & ny: Image dimensions
						n_times: Number of consecutive timesteps
						n_inputs: Number of types of inputs
		
		output: Output array of dimensions (nx, ny, n_depths*n_comp) where
						nx & ny: Image dimensions
						n_depths: Number of optical/geometrical depths to infer
						n_comp: Number of components of the velocity vector to infer
		
		border: Number of pixels to crop from the image in each direction
						border_x1: Number of pixels to remove from the left of the image
						border_x2: Number of pixels to remove from the right of the image
						border_y1: Number of pixels to remove from the bottom of the image
						border_y2: Number of pixels to remove from the top of the image
						
		same_as_training: Set to 1 if using data from the same simulation as the one used for training.
							-> The inputs will be normalized using the same values as the inputs in the
								training set because the values are known.
								
		network: Provide path to the network weights and normalization values
		
		"""
		
		# Only allocate needed memory with Tensorflow
		config = tf.compat.v1.ConfigProto()
		config.gpu_options.allow_growth = True
		session = tf.compat.v1.Session(config=config)
		# ktf.set_session(session)
		
		# -----------------
		# Input properties:
		# -----------------
		# Read
		self.observations = observations
		n_timesteps, nx, ny = observations.shape
		# Number of types of inputs
		self.n_inputs = 1
		# Number of consecutive frames of a given input
		self.n_times = 2
		# Number of images to generate
		self.n_frames = n_timesteps - 1
		# Image dimensions
		self.border_x1 = border_x1
		self.border_x2 = border_x2
		self.border_y1 = border_y1
		self.border_y2 = border_y2
		self.nx = nx - self.border_x1 - self.border_x2
		self.ny = ny - self.border_y1 - self.border_y2
		
		# ------------------
		# Output properties:
		# ------------------
		# Filename
		self.output = output
		# Number of inferred depths
		self.n_depths = 1
		# Number of inferred velocity components
		self.n_comp = 2
		self.n_outputs = self.n_depths*self.n_comp
		
		# -----------------
		# Network properties:
		# -----------------
		# Load training weights
		self.network_path = network_path
		self.weights_filename = self.network_path+'/deepvel_weights.hdf5'
		self.n_filters = 64
		self.kernel_size = 3
		self.batch_size = 1
		
		# ------------------------
		# Training set properties:
		# ------------------------
		# Load simulation min/max/mean/median/stddev
		filename_idl = self.network_path+"/MURaM_AR_Properties.sav"
		s_idl = idlsave.read(filename_idl)
		
		self.ic1_min = s_idl.data_minmax.ic1_min[0]
		self.ic1_max = s_idl.data_minmax.ic1_max[0]
		self.ic1_mean = s_idl.data_minmax.ic1_mean[0]
		self.ic1_median = s_idl.data_minmax.ic1_median[0]
		self.ic1_stddev = s_idl.data_minmax.ic1_stddev[0]
		self.vx1_min = s_idl.data_minmax.vx1_min[0]
		self.vx1_max = s_idl.data_minmax.vx1_max[0]
		self.vx1_mean = s_idl.data_minmax.vx1_mean[0]
		self.vx1_median = s_idl.data_minmax.vx1_median[0]
		self.vx1_stddev = s_idl.data_minmax.vx1_stddev[0]
		self.vy1_min = s_idl.data_minmax.vy1_min[0]
		self.vy1_max = s_idl.data_minmax.vy1_max[0]
		self.vy1_mean = s_idl.data_minmax.vy1_mean[0]
		self.vy1_median = s_idl.data_minmax.vy1_median[0]
		self.vy1_stddev = s_idl.data_minmax.vy1_stddev[0]
		self.bz1_min = s_idl.data_minmax.bz1_min[0]
		self.bz1_max = s_idl.data_minmax.bz1_max[0]
		self.bz1_mean = s_idl.data_minmax.bz1_mean[0]
		self.bz1_median = s_idl.data_minmax.bz1_median[0]
		self.bz1_stddev = s_idl.data_minmax.bz1_stddev[0]
		
		# --------------------
		# Test set properties:
		# --------------------
		# Use same normalization values as for the training and validation sets
		self.same_as_training = same_as_training
Exemple #58
0
import powerlaw
import math
import matplotlib.pyplot as plt
from matplotlib.pylab import plot
import numpy as np
import idlsave
import statistics as stats

# In[9]:

z = np.zeros((19,10000))

for p in range(0,19):
    file = 'zk'+str(10*p+10)+'mil.sav'
    s = idlsave.read(file)
    b = s['zk'+str(10*p+10)+'mil']
    for i in range(len(b)-1):
        a=np.array(b[i])
        for j in range(len(a[0])): 
            for k in range(len(a[1])):
                if 0.20<abs(a[j,k])<0.25:
                    z[p][i]=z[p][i]+1
           
'''
np.concatenate((a, b), axis=None)

zff = z[0]/4096                
plt.plot(zff[36449:39357])
plt.show()
mean(zff)
def main(plotSeparately=True, forceCreateCases=False, multiCPU=4):
    # Set the unsafe, target safe, and dataFolder
    unsafe_dir = "/home/diegodp/Documents/PhD/Paper_3/SolO_SDO_EUI/unsafe/"
    saveFolder = f"{unsafe_dir}ISSI/New_Method/"
    dataFolder = f"/home/diegodp/Documents/PhD/Paper_3/SolO_SDO_EUI/Scripts/ISSI/data/"

    # Parameters for showing FIG
    SHOWFIG = False

    # We set a large possible set of periodicities
    PeriodMinMax = [5, 30]
    makedirs(saveFolder, exist_ok=True)

    # IN SITU DATA
    df_is = pd.read_csv(f"{dataFolder}small_ch_in_situ.csv")
    df_is.index = pd.to_datetime(df_is["Time"])
    del df_is["Time"]

    insituParams = ["Vr", "Mf", "Np", "T", "Br"]
    df_is = df_is[insituParams]

    # Set up the dataframes with proper cadence, etc.
    # Attempt to read in dataframes
    try:
        df_171 = pd.read_csv(f"{dataFolder}small_ch_171_lc_in.csv",
                             index_col="Time")
        df_193 = pd.read_csv(f"{dataFolder}small_ch_193_lc_in.csv",
                             index_col="Time")
        df_flux = pd.read_csv(f"{dataFolder}ch_flux.csv", index_col="Time")
        print("Loaded csv successfully")

        for _df in (df_171, df_193, df_flux):
            _df.index = pd.to_datetime(_df.index)

    # If unable to load CSVs, generate them from base data
    except FileNotFoundError:
        # TODO: Make into function (make ISSI csv)
        # REMOTE DATA
        rs_171 = idlsave.read(f"{dataFolder}small_ch_171_lc_in.sav",
                              verbose=False)
        rs_193 = idlsave.read(f"{dataFolder}small_ch_193_lc_in.sav",
                              verbose=False)
        ch_flux = idlsave.read(f"{dataFolder}chflux.sav", verbose=False)

        # 171 and 193 observations
        time_array = rs_171.date_obs_171.copy()
        time_array = [t.decode() for t in list(time_array)]

        df_171 = pd.DataFrame(
            {
                "plume": rs_171.lc_171_plume_in,
                "cbpoint": rs_171.lc_171_bp_in,
                "chplume": rs_171.lc_171_ch_plume_in,
                "chole": rs_171.lc_171_ch_in,
                "qsun": rs_171.lc_171_qs_in,
            },
            index=pd.to_datetime(time_array),
        )

        df_193 = pd.DataFrame(
            {
                "plume": rs_193.lc_193_plume_in,
                "cbpoint": rs_193.lc_193_bp_in,
                "chplume": rs_193.lc_193_ch_plume_in,
                "chole": rs_193.lc_193_ch_in,
                "qsun": rs_193.lc_193_qs_in,
            },
            index=pd.to_datetime(time_array),
        )

        # Open and Bright point flux
        flux_time = ch_flux.hmitimes.copy()
        flux_time = [t.decode() for t in list(flux_time)]

        df_flux = pd.DataFrame(
            {
                "ch_open_flux": ch_flux.chofluxes,
                "ch_bpoint_flux": ch_flux.chbpfluxes,
            },
            index=pd.to_datetime(flux_time, format="%Y.%m.%d_%H:%M:%S_TAI"),
        )

        df_171.to_csv(f"{dataFolder}small_ch_171_lc_in.csv",
                      index_label="Time")
        df_193.to_csv(f"{dataFolder}small_ch_193_lc_in.csv",
                      index_label="Time")
        df_flux.to_csv(f"{dataFolder}ch_flux.csv", index_label="Time")

    # # Create test cases
    """
	Generate the cases for all possible SolO - SHORT times (every hour)
	"""
    AIACases = {
        "shortTimes": (datetime(2018, 10, 29,
                                16), datetime(2018, 10, 30, 23, 50)),
        "longTimes": (datetime(2018, 10, 31, 8), datetime(2018, 11, 2, 8)),
        "shortDuration": 3,
        "caseName": "SDO_AIA",
        "shortDisplacement": 3,
        "MarginHours": 24,
        "savePicklePath":
        "/home/diegodp/Documents/PhD/Paper_3/SolO_SDO_EUI/Scripts/ISSI/cases/AIAcases.pickle",
        "forceCreate": forceCreateCases,
    }

    # Get the cases and put them together with respective AIA observations in Dic
    cases = caseCreation(**AIACases)
    AIACase = namedtuple("AIACase", ["name", "df", "regions", "cases"])
    LongCase = namedtuple("LongCase", ["name", "df"])

    shortDFDic = [
        AIACase(171, df_171.copy(), df_171.columns, cases),
        AIACase(193, df_193.copy(), df_193.columns, cases),
    ]
    longDF = LongCase("PSP", df_is.copy())

    if plotSeparately:

        emdAndCompareCases(
            shortDFDic,
            longDF,
            saveFolder=saveFolder,
            PeriodMinMax=PeriodMinMax,
            showFig=SHOWFIG,
            detrendBoxWidth=200,
            corrThrPlotList=np.arange(0.65, 1, 0.05),
            multiCPU=multiCPU,
        )
    else:
        raise NotImplementedError("Still unable to plot all together")
height = 0.6 * width * rat
fig = plt.figure(figsize=(cm2inch(width), cm2inch(height)))

count = 0
title = [
    r'$D_{353}$', r'$D_{353}^{\mathrm {b}}$', r'$\lambda_{-}$',
    r'$\lambda_{+}$', r'filaments $\lambda_{-}$', r'filaments $\lambda_{+}$'
]

fits1 = py.open('fits/Planck_dustmodel_353GHz_15arcmin_ns512.fits')
model = fits1[0].data

fil_model = hp.read_map('fits/input_dustmap_n2_nscale5.fits', 0)

m1 = idlsave.read('savesets/lambda_minus_plus.sav')

s = idlsave.read(
    'savesets/filaments_data_n2_nside_512_rotangle_15_cutpixel_20_datatype_filter.sav'
)
map1 = m1.lambda_minus
map2 = s.filament_map
mm7 = s.length_map

map2[np.where(mm7 < 2.1)] = 0.0

s2 = idlsave.read(
    'savesets/plus_filaments_data_n2_nside_512_rotangle_15_cutpixel_20_datatype_filter.sav'
)
map3 = m1.lambda_plus
map4 = s2.filament_map_plus