Ejemplo n.º 1
0
def flux_integral(spectbl, wa=None, wb=None, normed=False):
    """Compute integral of flux from spectbl values. Result will be in erg/s/cm2."""
    if normed:
        if 'normflux' not in spectbl.colnames:
            spectbl = add_normflux(spectbl)

    assert wa is None or wa >= spectbl['w0'][0]
    assert wb is None or wb <= spectbl['w1'][-1]

    if hasattr(wa, '__iter__'):
        rng = np.asarray(wa)
        if rng.size == 2:
            wa, wb = rng
        elif rng.size > 2:
            results = [
                flux_integral(spectbl, _rng, normed=normed) for _rng in rng
            ]
            fluxes, errs = zip(*results)
            return np.sum(fluxes), np.quadsum(errs)

    if wa is not None:
        spectbl = split_exact(spectbl, wa, 'red')
    if wb is not None:
        spectbl = split_exact(spectbl, wb, 'blue')

    dw = spectbl['w1'] - spectbl['w0']

    if normed:
        return np.sum(spectbl['normflux'] * dw), mnp.quadsum(
            spectbl['normerr'] * dw)
    else:
        return np.sum(spectbl['flux'] * dw), mnp.quadsum(spectbl['error'] * dw)
Ejemplo n.º 2
0
def fuv_cont_stats(star):
    """
    Get stats on FUV continuum flux:
        - avg flux
        - avg flux error
        - raito of FUV continuum to total flux in the FUV assuming flat continuum
        - error on ratio
    """
    pan = io.readpan(star)
    cont = utils.keepranges(pan, rc.contbands, ends='exact')
    dw = cont['w1'] - cont['w0']

    # assume flat continuum
    # Fcont_avg = np.sum(cont['flux'] * dw)/np.sum(dw)
    # Fcont_avg_err = mnp.quadsum(cont['error'] * dw)/np.sum(dw)
    # dw_fuv = rc.fuv[1] - rc.fuv[0]
    # Fall_FUV, Fall_FUV_err = utils.flux_integral(pan, *rc.fuv)
    # ratio = Fcont_avg * dw_fuv / Fall_FUV
    # ratio_err = abs(ratio)*np.sqrt((Fall_FUV_err/Fall_FUV)**2 + (Fcont_avg_err/Fcont_avg)**2)

    # just do continuum actual measured, ignore "in-between" continuumFcont_avg
    Fcont = np.sum(cont['flux'] * dw)
    Fcont_err = mnp.quadsum(cont['error'] * dw)
    Fall_FUV, Fall_FUV_err = utils.flux_integral(pan, cont['w0'][0],
                                                 cont['w1'][-1])
    ratio = Fcont / Fall_FUV
    ratio_err = abs(ratio) * np.sqrt((Fall_FUV_err / Fall_FUV)**2 +
                                     (Fcont_err / Fcont)**2)
    return Fcont, Fcont_err, ratio, ratio_err
Ejemplo n.º 3
0
def SEEFlareRatios(proximityCut=0.1):
    ## get the data from the file
    L3Afile = path.join(rc.solarpath, 'u_tmd_see_-----_sun_spectra_L3A.ncdf')
    with nc.Dataset(L3Afile) as ncsee:
        data = ncsee.variables

        # obs times
        year = np.floor_divide(data['DATE'][0], 1000)
        day = data['DATE'][0] - year * 1000  # out of 365
        sec = data['TIME'][0]
        time = year + day / 365.0 + sec / 3600.0 / 24.0 / 365.0

        # spectra
        flux = data['SP_FLUX'][0] / 10.0  # W m-2 AA-1
        err = data['SP_ERR_MEAS'][0] * flux  # msmt precision vs abs error

        # line data
        lineflux = data['LINE_FLUX'][0] / 10.0  # W m-2 AA-1
        lineerr = data['LINE_ERR_MEAS'][0] / 10.0 * lineflux
        linewaves = data['LINEWAVE'][0] * 10.0  # AA
        linenames = [''.join(n) for n in data['LINENAME'][0]]
        linenames = [
            '{} {:.0f}'.format(n, w)
            for n, w in zip(linenames, np.floor(linewaves))
        ]

    ## compute continuum fluxes
    # defined indices just by looking at the data
    # I matched the cont bands from the variability paper as closely as possible (there were 1340-1350, 1372-1380.5,
    # 1382.5-1389, and 1413-1424.5
    contnames = ['continuum 1340-1420']
    contindices = [[134, 137, 138, 141]]
    contflux = [np.sum(flux[:, i], axis=1) * 10.0
                for i in contindices]  # W m-2
    conterr = [mnp.quadsum(err[:, i], axis=1) * 10.0
               for i in contindices]  # W m-2
    contflux, conterr = [np.array(a).T for a in [contflux, conterr]]

    ## open and parse data from SEE flare catalog
    flareCat = Table.read(
        path.join(rc.solarpath, 'u_tmd_see_-----_sun_flare_catalog.csv'))

    # exclude events where SEE obs is too far from peak
    startsec = flareCat['start hour'] * 3600 + flareCat['start min'] * 60
    stopsec = flareCat['stop hour'] * 3600 + flareCat['stop min'] * 60
    # some spill over to next day
    stopsec[stopsec < startsec] = stopsec[stopsec < startsec] + 3600 * 24.0
    duration = stopsec - startsec
    keep = flareCat['see lag'].astype('f') / duration < proximityCut
    slimCat = flareCat[keep]

    ## combine line and cont data for flares
    names = linenames + contnames
    fluxes = np.hstack([lineflux, contflux])
    errs = np.hstack([lineerr, conterr])

    ## find which see observations are closest to the peak of each flare
    # get time of flare peak in decimal years
    peakyear = slimCat['year'] + slimCat['day of year']/365.0 + slimCat['peak hour']/365.0/24.0 \
                 + slimCat['peak min']/365.0/24.0/60.0
    startyear = slimCat['year'] + slimCat['day of year']/365.0 + slimCat['start hour']/365.0/24.0 \
                 + slimCat['start min']/365.0/24.0/60.0

    # for each peak, find the closest point and save the flux values for each line
    iflares = []
    for peak in peakyear:
        lags = time - peak
        iflares.append(np.argmin(np.abs(lags)))

    # then find nearest quiescent point with good S/N before the flare and take ratio
    goodSN = fluxes / errs > 5.0
    ratioList, ratioerrList = [], []
    for i in range(len(names)):
        ratios, ratioerrs = [], []
        for start, iflare in zip(startyear, iflares):
            b4flare = time < start
            usable = b4flare & goodSN[:, i]
            if np.any(usable):
                iquiescent = np.max(np.nonzero(usable)[0])
                ratio = fluxes[iflare, i] / fluxes[iquiescent, i]
                ratios.append(ratio)
                error = ratio * np.sqrt(
                    (errs[iquiescent, i] / fluxes[iquiescent, i])**2 +
                    (errs[iflare, i] / fluxes[iflare, i])**2)
                ratioerrs.append(error)
            else:
                ratios.append(np.nan)
                ratioerrs.append(np.nan)
        ratioList.append(ratios)
        ratioerrList.append(ratioerrs)
    ratioList = list(map(np.array, ratioList))
    ratioerrList = list(map(np.array, ratioerrList))
    return dict(list(zip(names,
                         ratioList))), dict(list(zip(names, ratioerrList)))