Beispiel #1
0
def overlapping(spec_or_bins_a, spec_or_bins_b):
    """Check if there is any overlap."""
    getbins = lambda sob: sob if type(sob) is np.ndarray else wbins(sob)
    wbinsa, wbinsb = map(getbins, [spec_or_bins_a, spec_or_bins_b])
    ainb0, ainb1 = [mnp.inranges(w, wbinsb, [0, 0]) for w in wbinsa.T]
    bina0, bina1 = [mnp.inranges(w, wbinsa, [0, 0]) for w in wbinsb.T]
    return np.any(ainb0 | ainb1) or np.any(bina0 | bina1)
Beispiel #2
0
def specSnapshot(star,
                 inst,
                 trange,
                 wrange,
                 n=100,
                 ax=None,
                 vCen=None,
                 maxpts=500,
                 **kwargs):
    if ax is None: ax = plt.gca()
    ph, p = io.readphotons(star, inst)
    keep = mnp.inranges(p['time'], trange)
    p = p[keep]

    gtis = np.array([ph['gti'].data['start'], ph['gti'].data['stop']]).T
    gt = mnp.range_intersect([trange], gtis)
    dt = np.sum(gt[:, 1] - gt[:, 0])

    w0, w1, spec, err = sp.smooth_spec(p['wavelength'], p['epera'], n)
    spec, err = spec / dt, err / dt
    w = (w0 + w1) / 2.0

    keep, = np.nonzero(mnp.inranges(w, wrange))
    keep = np.insert(keep, [0, len(keep)], [keep[0] - 1, keep[-1] + 1])
    w, spec, err = w[keep], spec[keep], err[keep]

    if len(w) > maxpts:
        keep = un.downsample_even(w, maxpts)
        w, spec, err = w[keep], spec[keep], err[keep]

    velocify = lambda w: (w - vCen) / vCen * 3e5
    if vCen is not None:
        w = velocify(w)
        ax.set_xlabel('Doppler Velocity [km s$^{-1}$]')
        ax.set_xlim(map(velocify, wrange))
    else:
        ax.set_xlabel('Wavelength [$\AA$]')
        ax.set_xlim(wrange)
    ax.set_ylabel(fluxlabel)

    return pu.errorpoly(w, spec, err, **kwargs)
Beispiel #3
0
def stackspecs(specs, range, norm=True, offfac=1.0, xlim=None):
    ax = plt.gca()
    if xlim is not None: plt.xlim(xlim)
    off = 0.0
    for spec in specs:
        w = utils.wbins(spec)
        wmid = (w[:, 0] + w[:, 1]) / 2.0
        f = spec['flux']
        fac = np.max(f[mnp.inranges(wmid, range)])
        if norm:
            f /= fac
        f += off
        l = specplot(w, f)
        xlim = ax.get_xlim()
        mid = (xlim[0] + xlim[1]) / 2.0
        plt.text(mid,
                 off,
                 spec.meta['STAR'],
                 bbox=dict(fc='w', alpha=0.5, lw=0))
        if norm:
            off += offfac
        else:
            off += fac * offfac
    plt.ylim(0.0, off)
Beispiel #4
0
def showFlareStats(star, inst, label, trange, dt=30.0, ax=None):
    if ax is None: ax = plt.gca()
    trange = np.array(trange)

    flareTbl, bands = io.readFlareTbl(star, inst, label)

    # make lightcurve
    groups = [range(len(bands))]
    curve = reduce.auto_curve(star,
                              inst,
                              bands,
                              dt,
                              appx=False,
                              groups=groups,
                              fluxed=True)
    t0, t1, flux, err = zip(*curve)[0]
    t = (t0 + t1) / 2.0

    # narrow lightcurve down to range of interest
    keep = (t1 > trange[0]) & (t0 < trange[1])
    t, flux, err = t[keep], flux[keep], err[keep]
    ymax = flux.max()

    # keep only largest flare in time range of interest
    keep = (flareTbl['start'] < trange[1]) & (flareTbl['stop'] > trange[0])
    flareTbl = flareTbl[keep]
    iflare = np.argmax(flareTbl['PEW'])
    flare = flareTbl[iflare]

    # reference time to start of flare
    tref = flare['start']
    t, flare['start'], flare['stop'], trange = t - tref, flare[
        'start'] - tref, flare['stop'] - tref, trange - tref

    # plot lightcurve highlighting flare points
    flarepts = mnp.inranges(t, [flare['start'], flare['stop']])
    ax.errorbar(t[~flarepts],
                flux[~flarepts],
                err[~flarepts],
                fmt='ko',
                capsize=0)
    ax.errorbar(t[flarepts],
                flux[flarepts],
                err[flarepts],
                fmt='rd',
                capsize=0)

    # reverse-engineer mean flux
    luminosity = flare['energy'] / flare['PEW']
    dist = sc.quickval(rc.proppath, star, 'dist')
    mnflux = luminosity / 4 / np.pi / (dist * 3.08567758e18)**2

    # plot mean flux
    ax.axhline(mnflux, color='gray', ls='--')
    tmid = np.mean(t[~flarepts])
    mnfluxstr = '$\overline{F}$ = %.1e' % mnflux
    ax.text(tmid, mnflux + ymax / 20.0, mnfluxstr, ha='center')

    # # plot duration
    # dt = flare['stop'] - flare['start']
    # y = 1.05 * np.max(flux + err)
    # ax.annotate('', xy=(flare['start'], y), xytext=(flare['stop'], y), arrowprops=dict(arrowstyle='<->', color='r'))
    # # ax.arrow(flare['start'], y, dt, 0.0, length_includes_head=True)
    # # ax.arrow(flare['start'], y, dt, 0.0, length_includes_head=True, head_starts_at_zero=True)
    # ax.text(tmidFlare, y*1.02, '{:.0f} s'.format(dt), ha='center', color='r')

    # fill area under flare
    # tmidFlare = np.sum(t[flarepts]*flux[flarepts])/np.sum(flux[flarepts])
    lo = [mnflux] * np.sum(flarepts)
    ax.fill_between(t[flarepts], lo, flux[flarepts], color='r', alpha=0.3)
    y = 0.2 * np.max(flux)
    intlbl = 'equiv. width = {:.0f} s\nenergy = {:.1g} erg'.format(
        flare['PEW'], flare['energy'])
    ax.text(0.05,
            0.95,
            intlbl,
            ha='left',
            va='top',
            color='r',
            transform=ax.transAxes)

    # axes
    ax.set_xlabel('Time [s]')
    ax.set_ylabel(linefluxlabel)
Beispiel #5
0
def spectrumMovieFrames(star,
                        inst,
                        band,
                        trange,
                        dt,
                        smoothfac,
                        axspec,
                        axcurve,
                        folder,
                        dpi=80,
                        velocityplot=False,
                        reftrange=None,
                        dryRun=False,
                        ylim=None):
    ph, photons = io.readphotons(star, inst)
    band, trange, reftrange = map(np.asarray, [band, trange, reftrange])

    fig = axcurve.get_figure()
    figwidth = fig.get_figwidth()
    axwidth = axcurve.get_position().width * figwidth
    axPix = axwidth * dpi

    def goodN(Nphotons):
        n = 100  # SN of 10
        if Nphotons / n > axPix:  # don't need to sample more finely than the pixel scale
            n = Nphotons / axPix
        elif Nphotons / n < 20:  # want at least some resolution
            n = max(Nphotons / 20, 9)  # but no less than SN of 3ish
        return n

    # re-reference times to start of time range
    tref = trange[0]
    photons['time'] -= tref
    if reftrange is not None: reftrange -= tref
    trange -= tref

    if velocityplot:
        velocify = lambda w: (w - velocityplot) / velocityplot * 3e5
        vband = velocify(band)

    p = photons
    tkeep = [
        max(p['time'][0], -trange[1] * 0.5),
        min(p['time'][-1], trange[1] * 1.5)
    ]
    dw = band[1] - band[0]
    wkeep = [band[0] - 5 * dw, band[1] + 5 * dw]

    # get rid of superfluous counts
    keep = mnp.inranges(p['time'], tkeep) & mnp.inranges(
        p['wavelength'], wkeep)
    p = p[keep]

    ## make lightcurve and set up initial plot
    nlc = goodN(
        np.sum(
            mnp.inranges(p['wavelength'], band)
            & mnp.inranges(p['time'], trange)))
    t0, t1, lc, lcerr = sp.smooth_curve(p['time'],
                                        p['wavelength'],
                                        p['epera'],
                                        nlc,
                                        bands=[band],
                                        trange=tkeep)
    tlc = (t0 + t1) / 2.0
    axcurve.set_xlim(trange)
    axcurve.set_xlabel('Time [s]')
    axcurve.set_ylabel('Integrated Flux \n[erg cm$^{-2}$ s$^{-1}$]')
    inrange = mnp.inranges(tlc, trange)
    pu.errorpoly(tlc[inrange],
                 lc[inrange],
                 lcerr[inrange],
                 'k-',
                 ax=axcurve,
                 alpha=0.3,
                 ealpha=0.15)

    # make spectrum frames
    T = dt * smoothfac
    nframes = int(round((trange[1] - trange[0] - T) / dt))
    t1s = np.linspace(trange[0] + T, trange[1], nframes)
    t0s = t1s - T
    wList, specList, errList = [], [], []
    for t0, t1 in zip(t0s, t1s):
        inInterval = mnp.inranges(p['time'], [t0, t1])
        pt = p[inInterval]
        n = goodN(np.sum(mnp.inranges(pt['wavelength'], band)))
        w0, w1, spec, err = sp.smooth_spec(pt['wavelength'], pt['epera'], n,
                                           wkeep)
        wList.append((w0 + w1) / 2.0)
        specList.append(spec / T)
        errList.append(err / T)

    ## set up spectrum plot
    if velocityplot:
        axspec.set_xlabel('Doppler Velocity [km s$^{-1}$]')
        axspec.set_xlim(vband)
    else:
        axspec.set_xlabel('Wavelength [$\AA$]')
        axspec.set_xlim(band)
    axspec.set_ylabel(fluxlabel)
    if ylim is None:
        ymin = min([np.min(s - e) for s, e in zip(specList, errList)])
        ymax = max([np.max(s + e) for s, e in zip(specList, errList)])
        axspec.set_ylim(ymin, ymax)
    else:
        axspec.set_ylim(ylim)

    # compute and plot reference spectrum, if desired
    if reftrange is not None:
        gtis = np.array([ph['gti'].data['start'], ph['gti'].data['stop']
                         ]).T - tref
        gt = mnp.range_intersect([reftrange], gtis)
        Tref = np.sum(gt[:, 1] - gt[:, 0])
        keep = mnp.inranges(photons['time'], reftrange) & mnp.inranges(
            photons['wavelength'], wkeep)
        pt = photons[keep]
        nref = goodN(np.sum(mnp.inranges(pt['wavelength'], band)))
        w0, w1, spec, _ = sp.smooth_spec(pt['wavelength'], pt['epera'], nref,
                                         wkeep)
        w = (w0 + w1) / 2.0
        if velocityplot:
            w = velocify(w)
        spec = spec / Tref
        axspec.plot(w, spec, 'k-', alpha=0.3)

    # make folder to save frames in if it doesn't exist
    if not os.path.exists(folder):
        os.mkdir(folder)

    ## loop through frames
    if dryRun:
        nframes = dryRun
    for i in range(nframes):
        # plot time range on lightcurve
        span = axcurve.axvspan(t0s[i], t1s[i], color='k', alpha=0.2)
        inrange = mnp.inranges(tlc, [t0s[i], t1s[i]])
        linelc, polylc = pu.errorpoly(tlc[inrange],
                                      lc[inrange],
                                      lcerr[inrange],
                                      'k-',
                                      ax=axcurve,
                                      ealpha=0.3)

        # plot spectrum
        w, spec, err = wList[i], specList[i], errList[i]
        inrange = mnp.inranges(w, band)
        inrange = np.nonzero(inrange)[0]
        inrange = np.insert(inrange, [0, len(inrange)],
                            [inrange[0] - 1, inrange[-1] + 1])
        ww, ss, ee = w[inrange], spec[inrange], err[inrange]
        ss[[0, -1]] = np.interp(band, ww, spec[inrange])
        ee[[0, -1]] = np.interp(band, ww, err[inrange])
        ww[[0, -1]] = band
        if velocityplot:
            ww = velocify(ww)
        linespec, polyspec = pu.errorpoly(ww,
                                          ss,
                                          ee,
                                          'k-',
                                          ax=axspec,
                                          ealpha=0.2)

        # save frame
        path = os.path.join(folder, '{:04d}.png'.format(i))
        fig.savefig(path, dpi=dpi)

        # remove plots
        [obj.remove() for obj in [span, linelc, polylc, linespec, polyspec]]
Beispiel #6
0
def lightcurveCompendium(stars='hosts',
                         figure=None,
                         flarecut=2.0,
                         flarelabel='SiIV',
                         dt=30.0,
                         colorful=False):
    """
    Create a compendium of lightcurves for the specified stars highlighting flares.
    """
    fig = plt.gcf() if figure is None else figure
    if colorful:
        colors = itertools.cycle(['b', 'g', 'r'])
    else:
        colors = itertools.cycle(['k'])
    inst = 'hst_cos_g130m'
    if stars == 'hosts':
        stars = filter(
            lambda s: len(db.findfiles('u', inst, 'corrtag_a', s)) >= 4,
            rc.observed)

    ## SET UP AXES
    ## -----------
    # setup axes with just bottom axis showing
    fig.set_facecolor('w')
    fig.add_axes((0.23, 0.14, 0.72, 0.84), frameon=False)
    ax = fig.axes[0]
    xax = ax.get_xaxis()
    xax.tick_bottom()
    yax = ax.get_yaxis()
    yax.set_visible(False)
    ax.set_clip_on(False)

    fntsz = mpl.rcParams['font.size']
    spacedata = (fntsz / 72.0 / fig.get_figwidth()) / 0.8 * 14000

    # common keywords to use in errorbar plot
    ekwds = dict(fmt='.', capsize=0.0)
    alphaNF = 0.1 if colorful else 0.4

    ## MAKE LIGHTCURVES
    ## ----------------
    bands = rc.flare_bands[inst]
    offset, offsets = 0.0, []
    for star, color in zip(stars, colors):
        # get flare info
        flares, bands = io.readFlareTbl(star, inst, flarelabel)

        curve = reduce.auto_curve(star,
                                  inst,
                                  dt=30.0,
                                  bands=bands,
                                  groups=[range(len(bands))])
        t0, t1, cps, err = zip(*curve)[0]

        t = (t0 + t1) / 2.0

        # get rid of gaps
        s, j, _ = mnp.shorten_jumps(t, maxjump=10 * dt, newjump=30 * dt)

        # identify flare pts
        flares = flares[flares['PEWratio'] > flarecut]
        flare_ranges = np.array([flares['start'], flares['stop']]).T
        flarepts = mnp.inranges(t, flare_ranges)

        # normalize the data to median
        med = np.median(cps[~flarepts])
        y = cps / med
        y -= 1.0
        e = err / med

        # normalize data to max - min
        ymax = np.max(y)
        y /= ymax
        e /= ymax

        # cull negative outliers
        good = y > -ymax
        s, y, e, flarepts = [a[good] for a in [s, y, e, flarepts]]

        # offset data in y
        offset = offset - np.min(y - e)
        offsets.append(offset)
        yo = y.copy() + offset

        # plot data
        ax.errorbar(s[~flarepts],
                    yo[~flarepts],
                    e[~flarepts],
                    alpha=alphaNF,
                    color=color,
                    **ekwds)
        ax.axhline(offset, linestyle='--', color='k', alpha=0.5)
        flarecolor = 'r' if color == 'k' else color
        ax.errorbar(s[flarepts],
                    yo[flarepts],
                    e[flarepts],
                    color=flarecolor,
                    **ekwds)

        # make arrow
        xy = (0.0, offset)
        xytext = (0.0, offset + 1.1)
        arrowprops = dict(arrowstyle='<-', color=color)
        ax.annotate('', xy=xy, xytext=xytext, arrowprops=arrowprops)

        # label arrow
        ymaxstr = '{:4.1f}'.format(ymax + 1.0)
        ax.text(-0.5 * spacedata,
                offset,
                '1.0',
                va='bottom',
                ha='right',
                color=color,
                fontsize=fntsz * 0.8)
        ax.text(-0.5 * spacedata,
                offset + 1.1,
                ymaxstr,
                va='top',
                ha='right',
                color=color,
                fontsize=fntsz * 0.8)

        # label star
        starlbl = starprops['name tex'][star]
        ax.text(-3.0 * spacedata,
                offset + 0.5,
                starlbl,
                va='center',
                ha='right',
                color=color)

        # increase offset
        offset += np.max(y + e)
        offset += 0.3

    ax.autoscale(axis='both', tight=True)

    # add x axis line and label
    ax.axhline(0, color='k')
    ax.set_xlabel('Time, Observation Gaps Shortened (s)')
Beispiel #7
0
def killnegatives(spectbl,
                  sep_insts=False,
                  quickndirty=False,
                  minSN=None,
                  res_limit=1.0):
    """
    Removes negative bins by summing with adjacent bins until there are no negative bins left. I.e. the resolution in
    negative areas is degraded until the flux is no longer negative.

    Parameters
    ----------
    spectbl

    Returns
    -------
    newtbl
        A new bare-bones spectbl that has bin edges, flux, and error
        WARNING: the obs date, instrument, etc. columns will all get set to default values, as will all of the
        metadata except for 'star'
    """
    # if not np.any(spectbl['flux'] < 0):
    #     return spectbl

    if sep_insts:
        return inst_by_inst(spectbl,
                            killnegatives,
                            sep_insts=False,
                            quickndirty=quickndirty,
                            minSN=minSN)

    if hasgaps(spectbl):
        return gap_by_gap(spectbl,
                          killnegatives,
                          sep_insts=False,
                          quickndirty=quickndirty,
                          minSN=minSN)

    w0, w1, f_dsty, e_dsty = [
        spectbl[s].copy() for s in ['w0', 'w1', 'flux', 'error']
    ]
    line_bands = np.vstack(rc.line_bands.values())
    line_bands = line_bands[np.argsort(line_bands[:, 0]), :]
    untouchable = mnp.inranges(w0, line_bands) | mnp.inranges(w1, line_bands)
    if minSN is None:
        untouchable = untouchable & (f_dsty >= 0)
    else:
        untouchable = untouchable & (f_dsty / e_dsty >= minSN)
    dw = w1 - w0
    f, e = f_dsty * dw, e_dsty * dw
    v = e**2

    # I had this kind of vectorized once, but ultimately I think it just made for terrible readability with little gain in speed
    # print "bad bins remaining:"
    while True:
        if minSN is None:
            n = np.sum(f < 0)
        else:
            n = np.sum(f / np.sqrt(v) < minSN)
        # print n
        if n == 0:
            break

        # find the worst offending point
        imin = np.argmin(f / np.sqrt(v)) if minSN else np.argmin(f)

        # integrate bins progressively outward until it no longer offends
        i0, i1 = imin - 1, imin + 1
        w0bin, w1bin, fbin, vbin = w0[imin], w1[imin], f[imin], v[imin]
        side = 0
        while True:
            if minSN is None:
                if fbin >= 0:
                    break
            else:
                if fbin / sqrt(vbin) >= minSN:
                    break

            # check if we should stop integrating outward on either side
            stop_at_0 = i0 < 0 or untouchable[i0]
            stop_at_1 = i1 > len(f) - 1 or untouchable[i1]

            # if can't integrate further outward, then set fbin to 0 if it is still negative and break
            if stop_at_0 and stop_at_1:
                if fbin < 0:
                    fbin, vbin = 0, 0
                break

            # else incorporate the next bin
            if side == 0 and not stop_at_0:
                fbin += f[i0]
                vbin += v[i0]
                i0 -= 1
            elif not stop_at_1:
                fbin += f[i1]
                vbin += v[i1]
                i1 += 1

            # switch sides if possible
            if stop_at_0:
                side = 1
            elif stop_at_1:
                side = 0
            else:
                side = not side

        # replace the appropriate section of the vectors
        bad_block = slice(i0 + 1, i1)
        arrays = w0, w1, f, v, untouchable
        wrng = w1[i1 - 1] - w0[i0 + 1]
        if wrng > res_limit:
            w0in = np.arange(w0[i0 + 1], w1[i1 - 1], res_limit)
            w1in = np.append(w0in[1:], w1[i1 - 1])
            dw = w1in - w0in
            fin = fbin * dw / wrng
            vin = vbin * dw / wrng
            inserts = w0in, w1in, fin, vin, [False] * len(fin)
        else:
            inserts = w0[i0 + 1], w1[i1 - 1], fbin, vbin, False
        new_arrays = []
        for a, value in zip(arrays, inserts):
            a = np.delete(a, bad_block)
            a = np.insert(a, i0 + 1, value)
            new_arrays.append(a)
        w0, w1, f, v, untouchable = new_arrays

    # return a spectbl
    dw = w1 - w0
    f_dsty, e_dsty = f / dw, np.sqrt(v) / dw
    bins = np.array([w0, w1]).T
    newspec = rebin(spectbl, bins)
    newspec['flux'] = f_dsty * newspec['flux'].unit
    newspec['error'] = e_dsty * newspec['error'].unit
    return newspec