Beispiel #1
0
def plot_fits(datafile, fitfile, fitp, output, numlines):

    hdu = pyfits.open(datafile)[0]
    data = hdu.data
    header = hdu.header
    
    cdelt = header['CDELT1']
    crpix = header['CRPIX1']
    crval = header['CRVAL1']
    
    print cdelt

    wave = (np.arange(data.shape[1]) + crpix-1)*cdelt + crval
    
    fits = pyfits.open(fitfile)[0].data
    
    d = i2p.parse_fitprofs(fitp,numlines)

    if numlines == 3:
        x = np.arange(rlist[1][0], rlist[1][1])
    else:
        x = np.arange(rlist[0][0], rlist[0][1])

    idx = np.where((wave >= x.min()) & (wave <= x.max()))[0]
    
    wave = wave[idx]
    data = data[:,idx]
    fits = fits[:,idx]

    pp = PDF(output)
    for ap in range(d[1].shape[0]):
        print ap+1
        y = np.zeros(wave.size)
        ax = plt.figure().add_subplot(111)
        ax.set_xlabel('Wavelength')
        ax.set_ylabel('Flux')
        ax.set_title('Ap {}'.format(ap+1))
        back = []
        for i in range(numlines):
            back.append(d[6][ap,i])
            tmp = np.exp(-0.5*(wave - d[1][ap,i])**2/(d[2][ap,i]/2.355)**2)
            tmp *= d[7][ap,i]/np.max(tmp)
#            tmp *= d[4][ap,i]/np.sum(tmp)
            ax.plot(wave,tmp+back[-1],'r:')
            y += tmp
    
        if numlines == 3:
            ratio = d[7][ap,2]/d[7][ap,1]
            ax.text(0.8,0.8,'ratio = {:4.3f}'.format(ratio),transform=ax.transAxes)

        ax.plot(wave,y+np.mean(back),'b',alpha=0.7)
        ax.plot(wave,data[ap,:],'k')
        ax.plot(wave,fits[ap,:]+np.mean(back),'r')
        pp.savefig(ax.figure)
        
    pp.close()
    plt.close('all')

    return
Beispiel #2
0
def get_results(output, threshold=3., filename=''):
    """Parse fitprof output and display results

    The line centers are taken from the output of fitprof. For each line specified in the module header the average offset and stddev across all fibers in the IFU is computed. Output is a textfile and a plot of accuracy and stochasticity as a function of wavelength.

    Parameters
    ----------
    output : str
        Name of the output text file. This file will contain the mean, offset, stddev, and number of rejected apertures.
    threshold : float, optional
        Threshold value for iterative sigma clipping in mean across IFU. The total number of rejected fibers will be recorded in the output file.

    Returns
    -------
    None :
       The result is a text file containing the results and a plot containing the accuracy as a function of wavelength.
    
    Notes
    -----
    Right now the plot is hardcoded to be writting to WLC.png

    """

    fig = plt.figure()
    acax = fig.add_subplot(211)
    acax.set_xticklabels([])
    acax.set_ylabel('Mismatch [AA]')
    
    stax = fig.add_subplot(212)
    stax.set_xlabel('Wavelength')
    stax.set_ylabel('IFU std')

    with open(output,'a') as f:
        f.write('# {}\n'.format(time.asctime()))
        f.write('# {}\n'.format(filename))

        for l, n, c in zip(llist,numlist,centlist):
            proffile = '{}.fitp'.format(l)
            d = i2p.parse_fitprofs(proffile,n)[1]
            mean = np.mean(d,axis=0)
            std = np.std(d,axis=0)
            rejidx = np.where(np.abs(d - mean) > std*threshold)
            i = 0
            while rejidx[0].size != 0:
                d = np.delete(d,rejidx[0],axis=0)
                mean = np.mean(d,axis=0)
                std = np.std(d,axis=0)
                rejidx = np.where(np.abs(d - mean) > std*threshold)
                i += rejidx[0].size
                
            diff = mean - c
            outstr = '{:} ({:}):\n\t{:>7}: {:}\n\t{:>7}: {:}\n\t{:>7}: {:}\n\t{:>7}: {:}\n'.\
                     format(l,c,'numrej',i,'mean',mean,'diff',diff,'std',std)
            prtstr = ''
            for j in range(len(c)):
                prtstr += '{} {} {}\n'.format(c[j],diff[j],std[j])

            print prtstr
            f.write(outstr)
            acax.plot(c,diff,'.k')
            stax.plot(c,std,'.k')
    
        f.write('\n\n')

    fig.subplots_adjust(hspace=0.0001)
    acax.set_xlim(*stax.get_xlim())
    plt.savefig('WLC.png')

    return
Beispiel #3
0
def make_balmer_model(Hafitp, location, velocities, output, tauV_coeffs=[-1.06,3.78], #for ma11
                      dispdata = '/d/monk/eigenbrot/WIYN/14B-0456/anal/disp/GP_disp_batch_avg_int.fits'):
    
    #bc03 coeffs = [-0.91,4.15]

    # tauV = np.loadtxt(balmerD, usecols=(1,), unpack=True)
    tauV = np.poly1d(tauV_coeffs)
    sizes, z = np.loadtxt(location, usecols=(1,5), unpack=True)
    vels = np.loadtxt(velocities, usecols=(1,), unpack=True)
    numap = vels.size
    Ha_data = i2p.parse_fitprofs(Hafitp,3)
    Ha_flux = Ha_data[4][:,0]/1e17 #- 1e-16

    #It's been long enough I think I can just use these magic numbers
    wave = np.arange(2011)*2.1 + 3340.
    ##wave = np.arange(1428)*2.1 + 3800. #for comparing to contsub data
    m_wave = np.logspace(np.log10(3340), np.log10(7600), 5000)
    mpix = np.mean(np.diff(m_wave)/m_wave[1:]*3e5) #size of 1 model pixel in km/s
    balmer_flux = np.zeros((numap,m_wave.size))

    #Read in disp data
    disph = pyfits.open(dispdata)[0]
    disp_head = disph.header
    disp_data = disph.data
    disp_numwave = disp_data.shape[1]
    disp_wave = np.arange(disp_numwave)*disp_head['CDELT1'] + disp_head['CRVAL1']
    disp_arr = np.zeros((5,m_wave.size))
    for d in range(5):
        disp_arr[d,:] = np.interp(m_wave,disp_wave,disp_data[d,:])/2.355

    sized = {0.937: 0,
             1.406: 1,
             1.875: 2,
             2.344: 3,
             2.812: 4}

    cents = [6563, 4861, 4341, 4102, 3970]
    ratios = np.array([2.86, 1, 0.47, 0.25, 0.16])/2.86 #dividing here doesn't really matter
    
    for c, r in zip(cents, ratios):
        idx = np.argmin(np.abs(m_wave - c))
        balmer_flux[:,idx] = r
        
    final_model = np.zeros((numap,wave.size))
    for i in range(numap):
        print i
        wave_red = wave * (1 + vels[i]/3e5)
        if np.isnan(tauV[i]):
            red = np.ones(m_wave.size)
        else:
            print 'z = {}; tau = {}'.format(z[i],tauV(np.abs(z[i])))
            red = np.exp(-1 * tauV(np.abs(z[i]))*(m_wave/5500.)**(-0.7))
        balmer_flux[i,:] *= red

        did = sized[sizes[i]]
        sigma_pix = disp_arr[did,:]/mpix
        balmer_flux[i,:] = mconv(balmer_flux[i,:],sigma_pix)

        # Haid = np.argmin(np.abs(m_wave - cents[0]))
        # mHa_peak = balmer_flux[i,Haid]
        Haid = np.where((m_wave > cents[0] - 30) & (m_wave < cents[0] + 30))[0]
        mHa_flux = np.sum(balmer_flux[i,Haid])
        print Ha_flux[i], mHa_flux
        balmer_flux[i,:] *= Ha_flux[i]/mHa_flux
        final_model[i,:] = np.interp(wave,m_wave,balmer_flux[i,:])
        final_model[i,:] = np.interp(wave,wave_red,final_model[i,:])

    final_hdu = pyfits.PrimaryHDU(final_model)
    final_hdu.header.update('CRPIX1',1)
    final_hdu.header.update('CRVAL1',3340.)
    final_hdu.header.update('CDELT1',2.1)
    
    final_hdu.writeto(output,clobber=True)
Beispiel #4
0
def get_results(pointing, output):
    """Parse fitprof output and display results

    The line centers are taken from the output of fitprof. For each line specified in the module header the average offset and stddev across all fibers in the IFU is computed. Output is a textfile and a plot of accuracy and stochasticity as a function of wavelength.

    Parameters
    ----------
    output : str
        Name of the output text file. This file will contain the mean, offset, stddev, and number of rejected apertures.
    threshold : float, optional
        Threshold value for iterative sigma clipping in mean across IFU. The total number of rejected fibers will be recorded in the output file.

    Returns
    -------
    None :
       The result is a text file containing the results and a plot containing the accuracy as a function of wavelength.
    
    Notes
    -----
    Right now the plot is hardcoded to be writting to WLC.png

    """
    print 'Consolidating measurements'
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_ylabel(r'$\tau_{\mathrm{V,balm}}$')
    ax.set_xlabel('Aperture number')
    ax.set_title(time.asctime())
    ax.set_ylim(-10,15)

    tHad = i2p.parse_fitprofs('P{}_Ha.fitp'.format(pointing),3)
    Haf = tHad[4][:,0]
    Hafe = tHad[5][:,0]
    tHBd = i2p.parse_fitprofs('P{}_HB.fitp'.format(pointing),1)
    HBf = tHBd[4][:,0]
    HBfe = tHBd[5][:,0]
    
    #Correct for error over/under-estimateion
    datafile = 'P{}_contsub.ms.fits'.format(pointing)
    hdu = pyfits.open(datafile)[0]
    data = hdu.data
    header = hdu.header
    
    cdelt = header['CDELT1']
    crpix = header['CRPIX1']
    crval = header['CRVAL1']
    
    wave = (np.arange(data.shape[1]) + crpix-1)*cdelt + crval
    idx = np.where((wave > rlist[1][0]) & (wave < rlist[1][1]))[0]
    tflux = np.mean(data[:,idx],axis=1)
    ecorr = np.sqrt(0.289*tflux - 2.811)
    print tflux, ecorr
#    raw_input()
    # Hafe *= ecorr
    # HBfe *= ecorr

    #Correct for underfit [NII] line
    Hacorr = tHad[4][:,1] - tHad[4][:,2]/3.
    Haf += Hacorr

    ratio = Haf/HBf
    ratio_e = np.sqrt((Hafe/HBf)**2 + (HBfe*Haf/HBf**2)**2)

    # EBV = 1.97*np.log10(ratio/2.86)
    # Av = 4.05*EBV
    # tauV = Av/1.086
    # Av_e = 1.97*4.05*ratio_e/(1.086*ratio*np.log(10))
    
    tauV = 4.84*np.log(ratio/2.86)
    tauV_e = 4.84/ratio*ratio_e

    print 'Plotting'
    ax.errorbar(np.arange(ratio.size)+1,tauV,yerr=tauV_e,fmt='.')
    fig.savefig(output+'.png')

    with open(output+'.txt','w') as f:
        f.write('# Written on {}\n'.format(time.asctime()))
        f.write('# With Charlot and Fall extinction law\n')
        f.write('#\n#{:>3}{:>10}{:>10}\n\n'.format('Ap','Tau_V','TauV_err'))
        for i in range(tauV.size):
            f.write('{:4}{:10.2f}{:10.2f}\n'.format(i+1,tauV[i],tauV_e[i]))

    return Haf, Hafe, HBf, HBfe, ratio, ratio_e