示例#1
0
def test():
    sig = d.bundle(var1 = n.random.normal(2,2,1e5), var2 = n.random.normal(1,1,1e5))
    bg  = d.bundle(var1 = n.random.normal(0,1,1e4), var2 = n.random.normal(-1,2,1e4))
    vars = d.bundle(sig=sig, bg=bg).transpose()
    weights = d.bundle(sig=n.ones(1e5), bg=n.ones(1e4))

    hist1d = d.bundleize(d.factory.hist1d)
    d.visual()

    def initfunc(vc,vars,weights,mask):
        vc.myfig = p.figure()
        p.figure(vc.myfig.number)
        h1 = hist1d( vars.var1[mask], n.linspace(-20,20,101), weights[mask])
        c = d.bundle(sig="r", bg="k")
        h1.line(c=c)

    def updatefunc(vc,vars,weights,mask):
        vc.myfig.clear()
        p.figure(vc.myfig.number)
        h1 = hist1d( vars.var1[mask], n.linspace(-20,20,101), weights[mask])
        c = d.bundle(sig="r", bg="k")
        h1.line(c=c)
        vc.myfig.canvas.draw()

    def anyfunc(*args):
        print args

    c = VisualCutter(vars,weights, "(vars.var1 > %(var1)s) & (vars.var2 < %(var2)s)", initfunc, updatefunc)
    c.run()
示例#2
0
 def hist_energy_losses(self, frame, reco_prefix, **kwargs):
     import dashi as d
     d.visual()
     import numpy as n
     elosses = reco_prefix + 'CascadeEnergyLosses'
     if frame.Has(elosses):
         eloss = n.array(frame[elosses])
         h = d.histfactory.hist1d(n.log10(eloss), n.linspace(1, 5, 17))
         h.line()
示例#3
0
def test_plotting():
    import matplotlib as mpl
    mpl.use("agg")

    import pylab as p
    d.visual()
    nbins = 50
    bins = n.linspace(-10,10, nbins+1)
    sample = n.random.normal(2, 0.2, 1e5)
    weights = n.random.normal(1, 0.3, 1e5)
    weights[weights < 0] = 0
    hist = d.histogram.hist1d(bins)
    hist.fill(sample, weights)

    hist.scatter()
    #mod = hist.leastsq(d.gaussian(), verbose=False)
    #p.plot(hist.bincenters, mod(hist.bincenters))
    #hist.statbox(loc=1)
    #mod.parbox(loc=2)
    fname = "/tmp/testfigure.png"
    p.savefig(fname)
    assert os.path.exists(fname)
    os.remove(fname)
示例#4
0
def test_plotting():
    import matplotlib as mpl
    mpl.use("agg")

    import pylab as p
    d.visual()
    nbins = 50
    bins = n.linspace(-10,10, nbins+1)
    sample = n.random.normal(2, 0.2, 1e5)
    weights = n.random.normal(1, 0.3, 1e5)
    weights[weights < 0] = 0
    hist = d.histogram.hist1d(bins)
    hist.fill(sample, weights)

    hist.scatter()
    #mod = hist.leastsq(d.gaussian(), verbose=False)
    #p.plot(hist.bincenters, mod(hist.bincenters))
    #hist.statbox(loc=1)
    #mod.parbox(loc=2)
    fname = "/tmp/testfigure.png"
    p.savefig(fname)
    assert os.path.exists(fname)
    os.remove(fname)
示例#5
0
文件: utils.py 项目: wardVD/IceSimV05
def plot_radial_slice(h, spline, slice_=(3, 10, 1), **kwargs):
    import pylab
    import dashi
    dashi.visual()
    from icecube.photospline.glam.glam import grideval

    idx = slice_ + (slice(None), )
    sub = h[idx]
    idx = tuple([s - 1 for s in slice_]) + (slice(None), )

    coords = []
    for i, s in enumerate(idx):
        axis = h._h_bincenters[i][s]
        try:
            len(axis)
            #axis = numpy.linspace(axis[0], axis[-1], 101)
            axis = numpy.linspace(0, 250, 1001)
            # print axis
        except TypeError:
            axis = [axis]
        coords.append(axis)
    sub.scatter(**kwargs)
    print(grideval(spline, coords).flatten())
    pylab.plot(coords[-1], grideval(spline, coords).flatten(), **kwargs)
示例#6
0
#!/usr/bin/env python

#=========================================================================
# File Name     : angular-res.py
# Description   :
# Creation Date : 04-26-2016
# Last Modified : Tue 26 Apr 2016 03:48:09 PM CDT
# Created By    : James Bourbeau
#=========================================================================

import sys
sys.path.append("$HOME/dashi")
import dashi
dashi.visual()  # This is needed to display plots, don't ask me why
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import argparse

import myGlobals as my
import simFunctions_IT as simFunctions
from usefulFunctions import checkdir
import colormaps as cmaps
from geo import loadGeometryTxt
from plotFunctions import diverge_map
from ShowerLLH_scripts.analysis.load_sim import load_sim

def core_res(data,cuts):
    MC_x = data['MC_x']
    MC_y = data['MC_y']
    MC_r = np.sqrt(MC_x**2+MC_y**2)[cuts]
示例#7
0
"""
 use of 2d histograms for correlation studies
"""
import numpy as n
import dashi as d
import pylab as p

d.visual()

npoints = 1e5
y = n.random.uniform(0, 10, npoints)
x = 2 * y + 3 + n.random.normal(0, 2, npoints)

h = d.factory.hist2d((x, y), (n.linspace(0, 30, 101), n.linspace(0, 10, 11)),
                     labels=("x", "y"))

p.figure(figsize=(9, 9))
p.subplots_adjust(wspace=.25)

# simple imshow of bincontent array
p.subplot(221)
h.imshow()

# profile plots. project on dimension 1 ("y")
p.subplot(222)
scatterpoints = d.histfuncs.h2profile(h, dim=1)
scatterpoints.scatter()

# stacked 1d histograms plots
# keeping the overall shape of the projected distribution
p.subplot(223)
示例#8
0
from ssm.core import pchain
from ssm.pmodules import *
from ssm.core.util_pmodules import Aggregate,SimpleInjector
from ssm.pmodules import *

from ssm.pmodules import Reader
from CHECLabPy.plotting.camera import CameraImage
from pylab import *
from scipy import optimize
from scipy.optimize import curve_fit
from scipy.stats import chisquare
from operator import itemgetter
from ssm.core import pchain

import dashi
dashi.visual()

#def extract(lst):
#    return(lst[i][0] for i in range(len(lst)))
#    
def invf(y,a,b,c):
    return np.nan_to_num(-(b-np.sqrt(b*b -4*a*(c-y)))/(2*a))

current_path = '/home/wolkerst/projects/cta/SSM-analysis'
property_path = os.path.join(current_path+'/', "fit_parameters")

# =============================================================================
# # Load fit parameters
# =============================================================================
with open(property_path , 'rb') as handle:
    props = pickle.load(handle)
示例#9
0
"""
    an example to illustate the use of d.histfuncs.histratio
    to compare histograms with each other
"""
import numpy as n
import dashi as d
import pylab as p

d.visual()

# create histograms of two normal distributions, one
# having a small extra peak at x=0
# h1 has about 2 times the number of entries as h2
bins = n.linspace(-10,10,101)
h1a = d.factory.hist1d(n.random.normal(2.0,2,2e5), bins) 
h1b = d.factory.hist1d(n.random.normal(0.0,.5,1e4), bins)
h1 = h1a+h1b
h2 = d.factory.hist1d(n.random.normal(2.0,2,1e5), bins) 

# first figure: just plot the histograms
p.figure(figsize=(8,4))
h1.line(c="r")
h1a.line(c="r", linestyle=":")
h1b.line(c="r", linestyle=":")
h2.line(c="b")
p.axvline(0, color="k", linestyle="dashed")
h1.statbox(loc=1, edgecolor="r")
h2.statbox(loc=2, edgecolor="b")

# second figure:
# calculate the ratio of the two histograms, i.e. 
示例#10
0
"""
   fitting a  parabola using dashi.scatterpoints.points2d as 
   a data container
"""
import dashi as d; d.visual()
import numpy as n
import pylab as p

yerr=10.
data = d.scatterpoints.points2d()
data.x = n.linspace(-10,10,21)
data.y = 2*(data.x-2)**2 - 3 + n.random.normal(0,yerr,len(data.x))
data.yerr = yerr *  n.ones(len(data.x)) # initial guess for the error

mod = d.poly(2)
mod = d.fitting.leastsq(data.x,data.y,mod, chi2values=True)

p.figure(figsize=(9,4))
p.subplots_adjust(wspace=.25)

p.subplot(121)
data.scatter(fmt="ko", ms=3)
p.plot(data.x, mod(data.x), "r--")
mod.parbox(loc=1)

ax = p.subplot(122)
p.plot(mod.chi2values[0], mod.chi2values[1], "k-")
p.text(0.1,0.9, "$\chi^2/ndof = %.2f/%d$" % (mod.chi2, mod.ndof), transform = ax.transAxes) 
p.ylabel("chi2 contribution")

示例#11
0
import pylab
import numpy
import dashi; dashi.visual()
from utils import load_group, colorize, points

fluxlabel = '$d\Phi/dE\,\,[1/(GeV\,m^2 sr\,s)]$'
def log_fluxlabel(edges):
	d = numpy.diff(numpy.log(edges))[0]
	return '$%.2f \\times d\Phi/d\log10{(E/GeV)}\,\,[1/(m^2 sr\,s)]$' % d

def plot_single_energy(histogram_fname, flux, efit, normed=False, log=True):
	
	h = load_group(histogram_fname, 'energy')[:,:,1,:,:].project([0,1,3])
	if normed:
		# dP/dE
		norm = h._h_bincontent.sum(axis=2).reshape(h._h_bincontent.shape[:-1] + (1,))
	else:
		# dPhi/dE
		norm = numpy.diff(numpy.cos(h._h_binedges[0][::-1])).reshape((h._h_bincontent.shape[0],) + (1,)*(h.ndim-1))	
	
	h._h_bincontent /= norm
	h._h_squaredweights /= norm*norm
	
	fig = pylab.figure()
	fig.subplots_adjust(left=0.15)
	di = 5
	for color, zi in colorize(list(range(1, 11))):
		sub = h[zi,di,:]
		zenith = h._h_bincenters[0][zi-1]
		ct = numpy.cos(zenith)
示例#12
0
def compute_sensitivity():
    import pylab
    import matplotlib.pyplot as plt

    #or we load some
    (sig,bg,x) = load_expectations()
    #Plotting the pdfs for fun ;)


    #Here we set the number of events in our fake analysis using a
    #binned shape likelihood
    N = 7060
    llh = setup_llh(sig, bg, x,  N, 1)
    #Since it's a binned likelihood we can histogram the events to
    #speed up llh evaluations
    llh.EnableHistogramedEvents()
    llh.EnablePoissonSampling()
    #setting up the analysis
    analysis = MLSandboxPythonAccess.FeldmanCousinsAnalysis(llh = llh,
                                                            cl = 0.9, #confidence level (not really important yet)
                                                           )
    plt.show()
    #Compute rank distributions for different llh parameter xi as set up above
    #n_threads key argument sets number of threads to use during this step
    #analysis.ComputeRanks( n_experiments = 10000, #Number of pseudo experiments (trials)
    #                       min_xi = 0.0, #lower boundary of likelihood parameter for rank calculation
    #                       max_xi = 100.0/N, #upper boundary likelihood parameter for rank calculation
    #                       n_steps = 100, #number of steps
    #                       n_threads = 6)

    #since computing the ranks distributions takes a lot of CPU time/resources
    #analysis.ranks.save("ranks_example.dat")
    ranksobj = MLSandboxPythonAccess.FCRanks()
    ranksobj.load("ranks_example2.dat")
    #Pybindings are still missing for the function which makes an ensemble of
    #pseudo experiments and computes limits,
    #To determine the median upper limit when assuming bg only
    #we do it explicitly in python for now.
    up_lim = list()
    down_lim = list()
    analysis.SetFCRanks(ranksobj)
    analysis.ranks.SetConfidenceLevel(0.9)
    analysis.Sample(0)
    #Compute FC limits for this particular experiments
    (up, down) = analysis.ComputeLimits()

    import dashi
    dashi.visual()
    ranks = ranksobj.get_ranks()
    bedges = (np.linspace(0,200.0,100),np.linspace(-1,10,100))
    rankhist = dashi.histogram.hist2d(binedges = bedges)
    for k in ranks.keys():
        rankhist.fill((np.ones(len(ranks[k]))*float(k)*N,ranks[k] ))

    xi = np.linspace(0,200.0/N,100)
    ts = []
    critB = []
    for x in xi:
        ts.append(analysis.EvaluateTestsStatistic(x))
        critB.append(analysis.ranks.rCB(x))
    print(ts)
    print(critB)
    plt.figure()
    rankhist.imshow(log=True)
    plt.plot(xi*N,critB,'k')
    #plt.plot(xi*N,ts,'k')
    analysis.Sample(20.0/N)
    ts = []
    for x in xi:
        ts.append(analysis.EvaluateTestsStatistic(x))
    #plt.plot(xi,ts,'k')
    plt.colorbar()
    plt.show()