def z_split_max_likely(tbdata):
    posvar = np.linspace(0, 2, 5000)
    ### Remove edges ###
    tbdata = vari_funcs.remove_edges(tbdata)

    ### Split by z ###
    tbhigh, tblow = z_split(tbdata)

    ### Get luminosity and flux ###
    tbhigh, highL, highfluxnorm, higherrnorm = get_luminosity_and_flux(tbhigh)
    tblow, lowL, lowfluxnorm, lowerrnorm = get_luminosity_and_flux(tblow)

    ### Get sig values ###
    numobs = np.shape(highfluxnorm)[0]
    meanflux = np.nanmean(highfluxnorm, axis=1)
    highout = np.array([
        vari_funcs.maximum_likelihood(highfluxnorm[n, :],
                                      higherrnorm[n, :],
                                      meanflux[n],
                                      posvar,
                                      n=n,
                                      printn=100) for n in range(numobs)
    ])

    numobs = np.shape(lowfluxnorm)[0]
    meanflux = np.nanmean(lowfluxnorm, axis=1)
    lowout = np.array([
        vari_funcs.maximum_likelihood(lowfluxnorm[n, :],
                                      lowerrnorm[n, :],
                                      meanflux[n],
                                      posvar,
                                      n=n,
                                      printn=100) for n in range(numobs)
    ])
    return highL, highout, lowL, lowout, tbdata
Exemplo n.º 2
0
def get_ensemble_sig(tbdata, sigtb, binedge, posvar, aper=5):
    # Extract magnitude table and error table
    fluxnorm, fluxerrnorm, tbdata = get_and_normalise_flux(tbdata,
                                                           sigtb,
                                                           aper=5)

    ### Find luminosity distance ###
    z = tbdata['z_spec']  #[mask]
    z[z == -1] = tbdata['z_p'][z == -1]
    DL = cosmo.luminosity_distance(z)
    DL = DL.to(u.cm)

    ### Calculate the luminosity ###
    xrayF = tbdata['Full_flux']  #[chanmask]
    xrayL = xrayF * 4 * np.pi * (DL.value**2)

    ### Get stellar mass ###
    Mstar = tbdata['Mstar_z_p']

    ### get edd ratio ###
    Ledd = 1.26e31 * (0.1 * Mstar)
    eddrat = np.log(10 * xrayL) - np.log(Ledd)
    #    eddrat = (10*xrayL)/(Ledd)

    ### Create dicts to save data into ###
    enflux = {}
    enfluxerr = {}
    eneddrat = {}
    sig = np.empty(size)
    sigerr = np.empty(size)
    meaneddrat = np.empty(size)
    for m, enmin in enumerate(binedge):
        ### Isolate data needed ###
        mask1 = eddrat >= enmin
        if m != size - 1:
            mask2 = eddrat < binedge[m + 1]
        else:
            mask2 = np.ones(len(mask1))

        enmask = mask1 * mask2.astype(bool)

        enflux[m] = fluxnorm[enmask]
        enfluxerr[m] = fluxerrnorm[enmask]
        eneddrat[m] = eddrat[enmask]

        ### Combine into one flux curve per bin ###
        enfluxcurve = np.ravel(enflux[m])
        enfluxcurveerr = np.ravel(enfluxerr[m])

        ### Find max likelihood sig of curve ###
        [sig[m],
         sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve,
                                                    enfluxcurveerr, 1, posvar)

        ### find mean z ###
        meaneddrat[m] = np.nanmean(eneddrat[m])

    return fluxnorm, fluxerrnorm, sig, sigerr, eddrat, meaneddrat
def get_ensemble_sig(tbdata, sigtb, binedge, posvar, aper=5):
    # Extract magnitude table and error table
    flux = vari_funcs.flux_stacks(tbdata,aper)
    flux, tbdata = vari_funcs.noneg(flux, tbdata)
    flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata,aper)
    
    ### Normalise ###
    fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr)
    
    ### Find luminosity distance ###
    z = tbdata['z_spec']#[mask]
    z[z==-1] = tbdata['z_p'][z==-1]
    DL = cosmo.luminosity_distance(z)
    DL = DL.to(u.cm)
    
    ### Calculate the luminosity ###
    xrayF = tbdata['Full_flux']#[chanmask]
    xrayL = xrayF*4*np.pi*(DL.value**2)
    
    ### Create dicts to save data into ###
    enflux = {}
    enfluxerr = {}
    enxrayL = {}
    sig = np.empty(size)
    sigerr = np.empty(size)
    meanxrayL = np.empty(size)
    for m, enmin in enumerate(binedge):
        ### Isolate data needed ###
        mask1 = xrayL >= enmin
        if m != size-1:
            mask2 = xrayL < binedge[m+1]
        else:
            mask2 = np.ones(len(mask1))
        
        enmask = mask1*mask2.astype(bool)
    
        enflux[m] = fluxnorm[enmask]
        enfluxerr[m] = fluxerrnorm[enmask]
        enxrayL[m] = xrayL[enmask]
        
        ### Combine into one flux curve per bin ###
        enfluxcurve = np.ravel(enflux[m])
        enfluxcurveerr = np.ravel(enfluxerr[m])
        
        
        ### Find max likelihood sig of curve ###
        [sig[m],sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve, enfluxcurveerr, 1, posvar)
        
        ### find mean z ###
        meanxrayL[m] = np.nanmean(enxrayL[m])
        
    return fluxnorm, fluxerrnorm, sig, sigerr, xrayL, meanxrayL
def run_max_likely(tbdata):
    posvar = np.linspace(0,2,5000)
    ### Remove edges ###
    tbdata = vari_funcs.remove_edges(tbdata)
    
    ### Get luminosity and flux ###
    tbdata, L, fluxnorm, fluxerrnorm= get_luminosity_and_flux(tbdata)
    
    ### Get sig values ###
    numobs = np.shape(fluxnorm)[0]
    meanflux = np.nanmean(fluxnorm, axis=1)
    out = np.array([vari_funcs.maximum_likelihood(fluxnorm[n,:], 
                                                  fluxerrnorm[n,:], meanflux[n], 
                                                  posvar, n=n, printn=100) for n in range(numobs)])
    return L, out, tbdata
Exemplo n.º 5
0
def get_ensemble_sig(tbdata, sigtb, binedge, posvar, aper=5):
    # Extract magnitude table and error table
    flux = vari_funcs.flux_stacks(tbdata, aper)
    flux, tbdata = vari_funcs.noneg(flux, tbdata)
    flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(
        sigtb, tbdata, aper)

    ### Normalise ###
    fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr)

    ### Find z ###
    z = tbdata['z_spec']  #[mask]
    z[z == -1] = tbdata['z_p'][z == -1]

    ### Create dicts to save data into ###
    enflux = {}
    enfluxerr = {}
    enz = {}
    sig = np.empty(size)
    sigerr = np.empty(size)
    meanz = np.empty(size)
    for m, enmin in enumerate(binedge):
        ### Isolate data needed ###
        mask1 = z >= enmin
        if m != size - 1:
            mask2 = z < binedge[m + 1]
        else:
            mask2 = z < 4.5  #np.ones(len(mask1))

        enmask = mask1 * mask2.astype(bool)

        enflux[m] = fluxnorm[enmask]
        enfluxerr[m] = fluxerrnorm[enmask]
        enz[m] = z[enmask]

        ### Combine into one flux curve per bin ###
        enfluxcurve = np.ravel(enflux[m])
        enfluxcurveerr = np.ravel(enfluxerr[m])

        ### Find max likelihood sig of curve ###
        [sig[m],
         sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve,
                                                    enfluxcurveerr, 1, posvar)

        ### find mean z ###
        meanz[m] = np.nanmean(enz[m])

    return fluxnorm, fluxerrnorm, sig, sigerr, z, meanz
Exemplo n.º 6
0
def get_ensemble_sig(enflux, enfluxerr, enL, posvar):
    ### Create dicts to save data into ###
    size = len(enflux)
    sig = np.empty(size)
    sigerr = np.empty(size)
    meanL = np.empty(size)
    for m in enflux:
        ### Combine into one flux curve per bin ###
        enfluxcurve = np.ravel(enflux[m])
        enfluxcurveerr = np.ravel(enfluxerr[m])
        
        ### Find max likelihood sig of curve ###
        [sig[m],sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve, enfluxcurveerr, 1, posvar)

        ### find mean L ###
        meanL[m] = np.nanmean(enL[m])
    
    return sig, sigerr, meanL
Exemplo n.º 7
0
def run_max_likely(tbdata):
    posvar = np.linspace(0,2,5000)
    ### Remove edges ###
    tbdata = vari_funcs.remove_edges(tbdata)
    
    sigtb = Table.read('sigma_tables/quad_epoch_sigma_table_extra_clean_no06_2arcsec.fits')

    ### Extract magnitude table and error table ###
    flux = vari_funcs.flux4_stacks(tbdata)
    flux, tbdata = vari_funcs.noneg(flux, tbdata)
#    tbdata = tbdata[np.nanmean(flux,axis=1)>1e4]
    flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata, aper=4)
    
    ### Normalise ###
    fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr)
    
    ### Get sig values ###
    numobs = np.shape(fluxnorm)[0]
    meanflux = np.nanmean(fluxnorm, axis=1)
    out = np.array([vari_funcs.maximum_likelihood(fluxnorm[n,:], 
                                                  fluxerrnorm[n,:], meanflux[n], 
                                                  posvar, n=n, printn=100) for n in range(numobs)])
    return out, tbdata
Exemplo n.º 8
0
fulllow, fulllowL, fulllowfluxnorm, fulllowerrnorm = get_luminosity_and_flux(
    fulllow)
chanhigh, highchanL, highchanfluxnorm, highchanerrnorm = get_luminosity_and_flux(
    chanhigh)
chanlow, lowchanL, lowchanfluxnorm, lowchanerrnorm = get_luminosity_and_flux(
    chanlow)
xmmhigh, highxmmL, highxmmfluxnorm, highxmmerrnorm = get_luminosity_and_flux(
    xmmhigh, xmm=True)
xmmlow, lowxmmL, lowxmmfluxnorm, lowxmmerrnorm = get_luminosity_and_flux(
    xmmlow, xmm=True)

#%% get sig values
numobs = np.shape(highfluxnorm)[0]
meanflux = np.nanmean(highfluxnorm, axis=1)
highout = np.array([
    vari_funcs.maximum_likelihood(highfluxnorm[n, :], higherrnorm[n, :],
                                  meanflux[n], posvar) for n in range(numobs)
])

numobs = np.shape(lowfluxnorm)[0]
meanflux = np.nanmean(lowfluxnorm, axis=1)
lowout = np.array([
    vari_funcs.maximum_likelihood(lowfluxnorm[n, :], lowerrnorm[n, :],
                                  meanflux[n], posvar) for n in range(numobs)
])
numobs = np.shape(fullhighfluxnorm)[0]
meanflux = np.nanmean(fullhighfluxnorm, axis=1)
fullhighout = np.array([
    vari_funcs.maximum_likelihood(fullhighfluxnorm[n, :],
                                  fullhigherrnorm[n, :], meanflux[n], posvar)
    for n in range(numobs)
])
Exemplo n.º 9
0
    xenXrayL[m] = np.append(enXrayL[m], xmmenXrayL[m])

    ### Combine into one flux curve per bin ###
    #    enfluxcurve = np.ravel(enflux[m])
    #    enfluxcurveerr = np.ravel(enfluxerr[m])
    fenfluxcurve = np.ravel(fenflux[m])
    fenfluxcurveerr = np.ravel(fenfluxerr[m])
    #    xmmenfluxcurve = np.ravel(xmmenflux[m])
    #    xmmenfluxcurveerr = np.ravel(xmmenfluxerr[m])
    xenfluxcurve = np.ravel(xenflux[m])
    xenfluxcurveerr = np.ravel(xenfluxerr[m])

    ### Find max likelihood sig of curve ###
    #    [sig[m],sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve, enfluxcurveerr, 1, posvar)
    [fsig[m],
     fsigerr[m]] = vari_funcs.maximum_likelihood(fenfluxcurve, fenfluxcurveerr,
                                                 1, posvar)
    #    [xmmsig[m],xmmsigerr[m]] = vari_funcs.maximum_likelihood(xmmenfluxcurve, xmmenfluxcurveerr, 1, posvar)
    [xsig[m],
     xsigerr[m]] = vari_funcs.maximum_likelihood(xenfluxcurve, xenfluxcurveerr,
                                                 1, posvar)

    ### find mean xrayL ###
    #    meanxrayL[m] = np.nanmean(enXrayL[m])
    fmeanxrayL[m] = np.nanmean(fenXrayL[m])
    #    xmmmeanxrayL[m] = np.nanmean(xmmenXrayL[m])
    xmeanxrayL[m] = np.nanmean(xenXrayL[m])

### Get non ensemble results ###
numobs = np.shape(chanfluxnorm)[0]
meanchan = np.nanmean(chanfluxnorm, axis=1)
chanout = np.array([
Exemplo n.º 10
0
                                                                 aper=5)

### Normalise ###
fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr)
chanfluxnorm, chanerrnorm = vari_funcs.normalise_flux_and_errors(
    chanflux, chanerr)
fullfluxnorm, fullerrnorm = vari_funcs.normalise_flux_and_errors(
    fullflux, fullerr)
#%% All points
posvar = np.linspace(0, 2, 5000)
#start = time.time()

numobs = np.shape(fluxnorm)[0]
meanflux = np.nanmean(fluxnorm, axis=1)
out = np.array([
    vari_funcs.maximum_likelihood(fluxnorm[n, :], fluxerrnorm[n, :],
                                  meanflux[n], posvar) for n in range(numobs)
])

numobs = np.shape(chanfluxnorm)[0]
meanchan = np.nanmean(chanfluxnorm, axis=1)
chanout = np.array([
    vari_funcs.maximum_likelihood(chanfluxnorm[n, :], chanerrnorm[n, :],
                                  meanchan[n], posvar) for n in range(numobs)
])

numobs = np.shape(fullfluxnorm)[0]
meanfull = np.nanmean(fullfluxnorm, axis=1)
fullout = np.array([
    vari_funcs.maximum_likelihood(fullfluxnorm[n, :], fullerrnorm[n, :],
                                  meanfull[n], posvar) for n in range(numobs)
])
Exemplo n.º 11
0
    fenflux[m] = fullfluxnorm[fenmask]
    fenfluxerr[m] = fullerrnorm[fenmask]
    fenz[m] = fullz[fenmask]

    ### Combine into one flux curve per bin ###
    enfluxcurve = np.ravel(enflux[m])
    enfluxcurveerr = np.ravel(enfluxerr[m])
    cenfluxcurve = np.ravel(cenflux[m])
    cenfluxcurveerr = np.ravel(cenfluxerr[m])
    fenfluxcurve = np.ravel(fenflux[m])
    fenfluxcurveerr = np.ravel(fenfluxerr[m])

    ### Find max likelihood sig of curve ###
    [sig[m],
     sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve, enfluxcurveerr, 1,
                                                posvar)
    [csig[m],
     csigerr[m]] = vari_funcs.maximum_likelihood(cenfluxcurve, cenfluxcurveerr,
                                                 1, posvar)
    [fsig[m],
     fsigerr[m]] = vari_funcs.maximum_likelihood(fenfluxcurve, fenfluxcurveerr,
                                                 1, posvar)

    ### find mean z ###
    meanz[m] = np.nanmean(enz[m])
    cmeanz[m] = np.nanmean(cenz[m])
    fmeanz[m] = np.nanmean(fenz[m])

### Get non ensemble results ###
numobs = np.shape(fluxnorm)[0]
meanflux = np.nanmean(fluxnorm, axis=1)
Exemplo n.º 12
0
xenflux = {}
xenfluxerr = {}
xenXrayL = {}
for m, enmin in enumerate(binedge):
    xenflux[m] = np.vstack([enflux[m],xmmenflux[m]])
    xenfluxerr[m] = np.vstack([enfluxerr[m],xmmenfluxerr[m]])
    xenXrayL[m] = np.append(enXrayL[m],xmmenXrayL[m])

### Get ensemble maximum likelihood ###
xsig, xsigerr, xmeanxrayL = get_ensemble_sig(xenflux, xenfluxerr, xenXrayL, posvar)
fsig, fsigerr, fmeanxrayL = get_ensemble_sig(fenflux, fenfluxerr, fenXrayL, posvar)

### Get non ensemble results ###
numobs = np.shape(chanfluxnorm)[0]
meanchan = np.nanmean(chanfluxnorm, axis=1)
chanout = np.array([vari_funcs.maximum_likelihood(chanfluxnorm[n,:], chanerrnorm[n,:], meanchan[n], posvar) for n in range(numobs)])

numobs = np.shape(fullfluxnorm)[0]
meanfull = np.nanmean(fullfluxnorm, axis=1)
fullout = np.array([vari_funcs.maximum_likelihood(fullfluxnorm[n,:], fullerrnorm[n,:], meanfull[n], posvar) for n in range(numobs)])

numobs = np.shape(xmmfluxnorm)[0]
meanxmm = np.nanmean(xmmfluxnorm, axis=1)
xmmout = np.array([vari_funcs.maximum_likelihood(xmmfluxnorm[n,:], xmmerrnorm[n,:], meanxmm[n], posvar) for n in range(numobs)])

#%% Plot results ###
binupper = np.append(binedge[1:],np.max(sortedxrayL))
fxlow = fmeanxrayL-binedge
fxhigh = binupper - fmeanxrayL
xxlow = xmeanxrayL-binedge
xxhigh = binupper - xmeanxrayL
Exemplo n.º 13
0
#fullflux, fullerr, fullxray = vari_funcs.k_mag_flux.create_quad_error_array(sigtb, fullxray, aper=5)

### Normalise ###
fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr)
#chanfluxnorm, chanerrnorm = vari_funcs.normalise_flux_and_errors(chanflux, chanerr)
#fullfluxnorm, fullerrnorm = vari_funcs.normalise_flux_and_errors(fullflux, fullerr)
#%% All points
posvar = np.linspace(0, 2, 5000)
#start = time.time()

numobs = np.shape(fluxnorm)[0]
meanflux = np.nanmean(fluxnorm, axis=1)
out = np.array([
    vari_funcs.maximum_likelihood(fluxnorm[n, :],
                                  fluxerrnorm[n, :],
                                  meanflux[n],
                                  posvar,
                                  n=n,
                                  printn=1000) for n in range(numobs)
])

#numobs = np.shape(chanfluxnorm)[0]
#meanchan = np.nanmean(chanfluxnorm, axis=1)
#chanout = np.array([vari_funcs.maximum_likelihood(chanfluxnorm[n,:], chanerrnorm[n,:], meanchan[n], posvar) for n in range(numobs)])
#
numobs = np.shape(fullfluxnorm)[0]
meanfull = np.nanmean(fullfluxnorm, axis=1)
fullout = np.array([
    vari_funcs.maximum_likelihood(fullfluxnorm[n, :], fullerrnorm[n, :],
                                  meanfull[n], posvar) for n in range(numobs)
])
    
    fenflux[m] = fullfluxnorm[fenmask]
    fenfluxerr[m] = fullerrnorm[fenmask]
    fenz[m] = fullz[fenmask]
    
    ### Combine into one flux curve per bin ###
    enfluxcurve = np.ravel(enflux[m])
    enfluxcurveerr = np.ravel(enfluxerr[m])
    cenfluxcurve = np.ravel(cenflux[m])
    cenfluxcurveerr = np.ravel(cenfluxerr[m])
    fenfluxcurve = np.ravel(fenflux[m])
    fenfluxcurveerr = np.ravel(fenfluxerr[m])
    
    
    ### Find max likelihood sig of curve ###
    [sig[m],sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve, enfluxcurveerr, 1, posvar)
    [csig[m],csigerr[m]] = vari_funcs.maximum_likelihood(cenfluxcurve, cenfluxcurveerr, 1, posvar)
    [fsig[m],fsigerr[m]] = vari_funcs.maximum_likelihood(fenfluxcurve, fenfluxcurveerr, 1, posvar)
    
    ### find mean z ###
    meanz[m] = np.nanmean(enz[m])
    cmeanz[m] = np.nanmean(cenz[m])
    fmeanz[m] = np.nanmean(fenz[m])

### Get non ensemble results ###
numobs = np.shape(fluxnorm)[0]
meanflux = np.nanmean(fluxnorm, axis=1)
out = np.array([vari_funcs.maximum_likelihood(fluxnorm[n,:], fluxerrnorm[n,:], meanflux[n], posvar) for n in range(numobs)])

numobs = np.shape(chanfluxnorm)[0]
meanchan = np.nanmean(chanfluxnorm, axis=1)
    if m != size - 1:
        mask2 = Mstar < binedge[m + 1]
    else:
        mask2 = np.ones(len(mask1))

    enmask = mask1 * mask2.astype(bool)
    tempdata = chandata[enmask]
    print(len(tempdata))
    #%% Plot the single sources colour coded by M_star bin ###
    chanfluxnorm, chanerrnorm, tempdata = get_and_normalise_flux(tempdata,
                                                                 sigtb5,
                                                                 aper=5)
    numobs = np.shape(chanfluxnorm)[0]
    meanchan = np.nanmean(chanfluxnorm, axis=1)
    chanout = np.array([
        vari_funcs.maximum_likelihood(chanfluxnorm[n, :], chanerrnorm[n, :],
                                      meanchan[n], posvar)
        for n in range(numobs)
    ])

    ### Find luminosity distance for chandra sources ###
    chanz = tempdata['z_spec']  #[chanmask]
    chanz[chanz == -1] = tempdata['z_p'][chanz == -1]
    chanDL = cosmo.luminosity_distance(chanz)
    chanDL = chanDL.to(u.cm)

    ### Calculate the luminosity ###
    xrayF = tempdata['Full_flux']  #[chanmask]
    xrayL = xrayF * 4 * np.pi * (chanDL.value**2)

    #    if enmin == binedge[-1]:
    #        plt.errorbar(xrayL, chanout[:,0],yerr=chanout[:,1],fmt='o',