def sourceangles(data, met, dur=1.024, slist=['Sun'] + strongsources, showbg=False): import plots plots.importmpl() plt = plots.plt poshist = data['poshist']['GLAST POS HIST'].data tnear = np.linspace(met-10*dur, met+10*dur, 61) # (s2cnear, c2snear) = sctransformations(poshist, tnear) # earthnearxyz = earthposition(poshist, tnear) # earth position in xyz spacecraft coordinates earthnearxyz = earthpositioncel(poshist, tnear) # earth position in xyz celestial coordinates plt.fill_between(tnear - met, 0, 69, color='0.80') occdict['Sun'] = sunposition(met) # add sun for x in slist: xnearxyz = pt2xyz(occdict[x])[:,np.newaxis] angles = np.arccos(np.sum(xnearxyz * earthnearxyz, axis=0)) * 180./np.pi plt.plot(tnear - met, angles, label=x, linewidth=2.0) plt.ylim(60, 80.) if len(slist) > 5: plt.setp(plt.gcf(), figwidth=9, figheight=6) plt.gca().set_position([0.1, 0.1, 0.6, 0.8]) # Put a legend to the right of the current axis plt.legend(loc='center left', bbox_to_anchor=(1.1, 0.5), numpoints=1, prop={'size':10}) else: plt.legend() if showbg: data = loaddailydata(fermi2utc(met)) plt.twinx() for d in nlist: specdata = data[d]['SPECTRUM'].data # look at data for detector tcent = (specdata['TIME'] + specdata['ENDTIME']) / 2 - met # relative mean time idx = np.nonzero((specdata['QUALITY'] == 0) & (specdata['EXPOSURE'] > 0) & (np.abs(tcent) < 10*dur))[0] if len(idx) == 0: return (None, None) if not fitqual else (None, None, None, None, None, None) tcent = tcent[idx] tdur = specdata['EXPOSURE'][idx] counts = specdata['COUNTS'][idx][:,0] # rebin historamming binedges = np.linspace(-10*dur, 10*dur, 31) bcounts = np.histogram(tcent, bins=binedges, weights=counts)[0] btdur = np.histogram(tcent, bins=binedges, weights=tdur)[0] btcent = np.histogram(tcent, bins=binedges, weights=tcent*tdur)[0] / btdur plt.plot(btcent, bcounts/btdur, color='gray') # from scipy.interpolate import UnivariateSpline # s = UnivariateSpline(btcent, bcounts/btdur, w=btdur/np.sqrt(bcounts)) # plt.plot(tnear - met, s(tnear - met), color='gray') plt.xlim(-10 * dur, 10 * dur)
def gbmfit(data, t, duration, dlist=dlist, channels=[0,1,2,3,4,5,6,7], poiss=False, plot=False, degree=None, fitsize=10., plotsize=5., fitqual=False, rebin=None, crveto=True, injection=None, fitconf=False, dqshade=None): import fit # split dlist and channels if necessary if type(dlist) is str: dlist = dlist.split() if type(channels) is str: channels = channels.split() channels = map(int, channels) if rebin is None and duration > 1.025: # auto set rebin for long durations rebin = True (nd, nc) = (len(dlist), len(channels)) if plot and dqshade is None: dqshade = np.zeros((nc, nd)) fg = np.zeros((nc, nd)) # measured counts in fg window bg = np.zeros((nc, nd)) # background estimate in fg window goodfit = np.ones((nc, nd)) # boolean True for good fit (low chisq) vbfit = np.zeros((nc, nd)) # variance in bg estimate due to fit parameter errors vbsys = np.zeros((nc, nd)) # variance in bg estimate due to systematic error from polynomial model xsqdof = np.zeros((nc, nd)) # chisq DOF of bg fit if degree == None: degree = max(2, 1+np.ceil(np.log2(duration)/2.)) if plot: import plots plots.importmpl() plt = plots.plt import itertools from mpl_toolkits.axes_grid.anchored_artists import AnchoredText plt.clf() ymin = 1e6*np.ones((2,nc)) ymax = np.zeros((2,nc)) for (i, d) in enumerate(dlist): # i index detectors, j index channels ### grab data if data is None: data = loaddailydata(fermi2utc(t)) specdata = data[d]['SPECTRUM'].data # look at data for detector tcent = (specdata['TIME'] + specdata['ENDTIME']) / 2 - t # relative mean time idx = np.nonzero((specdata['QUALITY'] == 0) & (specdata['EXPOSURE'] > 0) & (np.abs(tcent) < fitsize*max(0.512, duration)))[0] # local data idx, at least 80 points if len(idx) == 0: return (None, None) if not fitqual else (None, None, None, None, None, None) tcent = tcent[idx] tdur = specdata['EXPOSURE'][idx] # exposure [s] counts = specdata['COUNTS'][idx][:,channels] # counts by channel if injection is not None: (injt, injdur, injrate) = injection injstart = injt - injdur/2. injend = injt + injdur/2. overlap = np.minimum(specdata['ENDTIME'][idx], injend) - np.maximum(specdata['TIME'][idx], injstart) overlap = overlap * (overlap > 0) counts += overlap[:,np.newaxis] * injrate[d][np.newaxis,:] # getting injrate for detector i, all channels, index order same as response matrix fgidx = np.nonzero(np.abs(tcent) <= duration/2.)[0] # foreground indices ### cosmic ray rejection, not this removes the first two and last two points if crveto: nearcounts = counts[:-4,:] + counts[1:-3,:] + counts[3:-1,:] + counts[4:,:] # nearby counts neartdur = tdur[:-4] + tdur[1:-3] + tdur[3:-1] + tdur[4:] nearflux = nearcounts / neartdur[:,np.newaxis] nearexpect = np.maximum(1, nearflux * tdur[2:-2, np.newaxis]) nearsnr = (counts[2:-2,:]-nearexpect) / np.sqrt(nearexpect) nearoutliers = np.nonzero(np.any(nearsnr > 7., axis=1))[0] + 2 # exclude 7-sigma outliers from background nearbigoutliers = np.nonzero(np.any(nearsnr > 500./np.sqrt(duration), axis=1))[0] + 2 # exclude 500-sigma excess from foreground inneridx = np.nonzero((tcent >= -3*duration/2.) & (tcent <= 5*duration/2.))[0] goodidx = list(set(range(2,len(tcent)-2)) - set(np.hstack((nearbigoutliers, nearbigoutliers+1, nearbigoutliers-1))) \ - (set(np.hstack((nearoutliers, nearoutliers+1, nearoutliers-1))) - set(inneridx))) goodidx.sort() tcent = tcent[goodidx] tdur = tdur[goodidx] counts = counts[goodidx,:] bgidx = np.nonzero((tcent < -3*duration/2.) | (tcent > 5*duration/2.))[0] # background estimation indices fgidx = np.nonzero(np.abs(tcent) <= duration/2.)[0] # foreground indices if len(fgidx) == 0 or (len(fgidx)==1 and tdur[fgidx[0]] > 1.5*duration): # skip no foreground time, or not enough time resolution return (None, None) if not fitqual else (None, None, None, None, None, None) twin = [tcent[fgidx[0]]-tdur[fgidx[0]]/2., tcent[fgidx[-1]]+tdur[fgidx[-1]]/2.] ### fit each channel separately for j in range(nc): if rebin: binsize = max(0.256, duration / 4.) binedges = np.arange(twin[0]-(fitsize-0.5)*max(0.512, duration), tcent[-1], binsize) bcounts = np.histogram(tcent, bins=binedges, weights=counts[:,j])[0] btdur = np.histogram(tcent, bins=binedges, weights=tdur)[0] idx = np.nonzero(btdur > 0)[0] (bcounts, btdur) = (bcounts[idx], btdur[idx]) bflux = bcounts / btdur btcent = np.histogram(tcent, bins=binedges, weights=tcent*tdur)[0][idx] / btdur bbgidx = np.nonzero((btcent < -duration) | (btcent > 2*duration))[0] bfgidx = np.nonzero(np.abs(btcent) <= duration/2.)[0] (ifit, tfit, cfit, dfit) = (bbgidx, btcent, bcounts, btdur) else: (ifit, tfit, cfit, dfit) = (bgidx, tcent, counts[:,j], tdur) if len(ifit) < degree+2: # require at least degree+1 points for background estimation return (None, None) if not fitqual else (None, None, None) # using numpy 1.7 polyfit, weights according to poisson error (1+sqrt(N)) accounting for duration since fit it on rate(t) # (par, cov) = fit.polyfit(tfit[ifit], cfit[ifit]/dfit[ifit], degree, cov=True, w=dfit[ifit]/(1.+np.sqrt(cfit[ifit]))) # we DO NOT do the weights estimates above because it FAILS for low-N outliers (bad approximation) (par, cov) = fit.polyfit(tfit[ifit], cfit[ifit]/dfit[ifit], degree, cov=True) # if poiss: # poisson max-likelihood refinement to polyfit par # import scipy.optimize # par = scipy.optimize.fmin(fit.negloglikelihood, par, args=(np.polyval, tcent[bgidx], counts[bgidx,j], tdur[bgidx]), disp=False) # cov = 0 # fmin is not going to give us parameter error estimates, so give up for now bgfit = dfit[ifit] * np.polyval(par, tfit[ifit]) chisq = (cfit[ifit]-bgfit)**2/bgfit chisqdof = np.sum(chisq)/len(ifit) maxchisq = np.max(chisq) smallfit = False if(chisqdof > 2.0 or maxchisq > (4**2+2*np.log(len(ifit)))): # if fit is bad, try fitting a smaller interval smallfit = True jfit = ifit[np.abs(tfit[ifit]) <= (fitsize/2.)*max(0.512, duration)] if len(jfit) > degree+1: # require smaller interval to have degree+2 points ifit = jfit (par, cov) = fit.polyfit(tfit[ifit], cfit[ifit]/dfit[ifit], degree, cov=True) # if poiss: # par = scipy.optimize.fmin(fit.negloglikelihood, par, args=(np.polyval, tcent[bgidx], counts[bgidx,j], tdur[bgidx]), disp=False) # cov = 0 bgfit = dfit[ifit] * np.polyval(par, tfit[ifit]) chisq = (cfit[ifit]-bgfit)**2/bgfit chisqdof = np.sum(chisq)/len(ifit) maxchisq = np.max(chisq) if(chisqdof > 3.0 or maxchisq > (5**2+2*np.log(len(ifit)))): # if fit is still bad give up mark bad background goodfit[j,i] = 0 xsqdof[j,i] = chisqdof bg[j,i] = np.sum(tdur[fgidx] * np.polyval(par, tcent[fgidx])) # expected background counts in fg region fg[j,i] = np.sum(counts[fgidx,j]) # accumulated fg counts lsqvars = tcent[fgidx]**np.arange(degree, -.1, -1)[:,np.newaxis] # e.g. [t**2, t, 0] dependent vars for the leastsq fit vbfit[j,i] = np.sum(tdur[fgidx])**2 * np.mean(lsqvars * np.dot(cov, lsqvars)) # variance contribution from parameter fit errors # note this is variance of the rate. to get variance of counts, we need to multiply by duration**2 vbsys[j,i] = np.sum(tdur[fgidx]) * np.sum(cfit[ifit]) / np.sum(dfit[ifit]) * max(0., chisqdof-1.) # we try to estimate the contribution to variance of the bg estimate from systematic error in the fit model. # we don't really know this, so we use the average systematic error from the data instead. # we take average rate * duration in fg = representative counts in fg duration, and mulitply by typical excess stds**2. # for example if we have average expected counts of 100, with statistical variance of 100, but chisq is 4, then we expect # systematics to be contributing an excess variance of 300 on top of the statistical 100, for total variance of 400. if plot: # lots of code to make a nice set of plots, if desired width = min(plotsize, fitsize/2.) if smallfit else min(plotsize, fitsize) x = np.linspace(-width*duration, width*duration, 512) y = np.polyval(par, x) # background rate curve lsqvars = x[np.newaxis,:]**np.arange(degree, -.1, -1)[:,np.newaxis] var = np.sum(lsqvars * np.dot(cov, lsqvars), axis=0) # prediction error std = np.sqrt(var) flux = counts[:,j]/tdur err = np.sqrt(tdur * np.polyval(par, tcent))/tdur fgdur = np.sum(tdur[fgidx]) fgt = np.mean(tcent[fgidx]) snr = (fg[j,i]-bg[j,i]) / np.sqrt(bg[j,i]) plt.subplot(nc, nd, 1+j*nd+i) if goodfit[j,i] == 0: plt.gca().set_axis_bgcolor('0.8') if dqshade[j,i] != 0: plt.gca().set_axis_bgcolor('0.6') plt.plot(x+duration/2., y, 'b-') # we plot x offset by duration/2 because all times are shifted to tstart=0 in plot plt.fill_between(x+duration/2., y+std, y-std, alpha='0.5', color='b') # 1 sigma if rebin: berr = np.sqrt(btdur * np.polyval(par, btcent))/btdur pidx = bbgidx[np.abs(btcent[bbgidx]) <= plotsize*duration] plt.errorbar(btcent+duration/2., bflux, yerr=berr, fmt='.', color='gray') # again shifting to 0=tstart plt.errorbar(btcent[pidx]+duration/2., bflux[pidx], yerr=berr[pidx], fmt='.', color='blue') plt.errorbar(btcent[bfgidx]+duration/2., bflux[bfgidx], yerr=berr[bfgidx], fmt='.', color='green', zorder=4) else: pidx = bgidx[np.abs(tcent[bgidx]) <= plotsize*duration] plt.errorbar(tcent+duration/2., flux, yerr=err, fmt='.', color='gray') if len(pidx) > 0: plt.errorbar(tcent[pidx]+duration/2., flux[pidx], yerr=err[pidx], fmt='.', color='blue') plt.errorbar(tcent[fgidx]+duration/2., flux[fgidx], yerr=err[fgidx], fmt='.', color='green', zorder=4) plt.errorbar(fgt+duration/2., fg[j,i]/fgdur, yerr=np.sqrt(bg[j,i])/fgdur, fmt='.', color='red', zorder=4) plt.xlim([(-plotsize+0.5)*duration, (plotsize+0.5)*duration]) print (d, channels[j]) # for some reason AnchoredText started needing an explicit prop dict(), toolkit bug at = AnchoredText("%s:%d" % (d, channels[j]), loc=2, pad=0.3, borderpad=0.3, prop=dict()) at.patch.set_boxstyle("square,pad=0.") plt.gca().add_artist(at) # for some reason AnchoredText started needing an explicit prop dict(), toolkit bug at = AnchoredText("SNR = %.1f" % snr, loc=1, pad=0.3, borderpad=0.3, prop=dict()) at.patch.set_boxstyle("square,pad=0.") plt.gca().add_artist(at) ymin[d[0] == 'n',j] = min(ymin[d[0] == 'n',j], plt.ylim()[0]) ymax[d[0] == 'n',j] = max(ymax[d[0] == 'n',j], plt.ylim()[1]) if i == 0: # first detector plt.ylabel('flux [counts/s]') elif i == nd-1: # last detector plt.gca().yaxis.set_label_position('right') plt.gca().yaxis.set_ticks_position('right') plt.gca().yaxis.set_ticks_position('both') plt.ylabel('flux [counts/s]') else: plt.gca().yaxis.set_ticklabels([]) if j == nc-1: # last channel plt.xlabel('relative time [s]') elif j == 0: # first channel plt.gca().xaxis.set_ticks_position('top') plt.gca().xaxis.set_ticks_position('both') else: plt.gca().xaxis.set_ticklabels([]) if plot: for (i, j) in itertools.product(range(nd), range(nc)): plt.subplot(nc, nd, 1+j*nd+i) plt.ylim(ymin[dlist[i][0] == 'n',j], ymax[dlist[i][0] == 'n',j]) if j != 0: # not first channel tics = plt.gca().yaxis.get_ticklocs() if tics[-1] == plt.ylim()[-1]: plt.gca().yaxis.set_ticks(tics[:-1]) plt.suptitle('GBM detectors at ' + fermi2utc(t-duration/2.).strftime(fmtlist[4])[:-3] + ' +%0.3fs' % duration, va='center') # non-zero hspace and wspace to avoid matplotlib <1.0 bug for disappearing subplots plt.subplots_adjust(left=1./(2+nd*4), right=1-1./(2+nd*4), top=1-1./(4+nc*3), bottom=1./(4+nc*4), hspace=0.0015, wspace=0.0015) plt.setp(plt.gcf(), figheight=1+3*nc, figwidth=2+3*nd) # fg: fg counts (signal+bg), bg: bg estimate, goodfit: fit is good or not (bool) # vbfit: variance in bg estimate from statistical, vbsys: variance in bg estimate from systematic return (fg, bg) if not fitqual else (fg, bg, goodfit, vbfit, vbsys, xsqdof)