コード例 #1
0
def graph_cdf_num_overlap_routes_detection(infos):

    overlaps_in_detection = defaultdict(lambda: 0)
    num_detection = 0
    
    for node in infos["nodes"]:
        print node
        detection_file = infos["database_dir"] + "/changes." + node
        detections_by_id = read.read_detection_file(detection_file)
        
        for detection_id,detection in detections_by_id.items():

            if(detection["changes_old"] == []):
                continue

            overlaps_in_detection[len(detection["overlap"])] += 1
            num_detection += 1

    x = overlaps_in_detection.keys() 
    x.sort()
    y = [overlaps_in_detection[i]/float(num_detection) for i in x]
    x_norm = [(z/1000.00) for z in x]
    cdf = np.cumsum(y)

    plt.step(x_norm,cdf, where="post")

    plt.xlabel("Fraction of paths that overlap the detection",fontsize=16)
    plt.ylabel("CDF of number of detections",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.savefig('out_cdf_num_overlap.pdf')
    plt.clf()
コード例 #2
0
def graph_cdf_route_coverage(infos):
    
    evaluation_2_files = [x for x in listdir(infos["output_dir"])
        if x.find("evaluation_2") != -1]
    all_dist = list()

    all_number = 0
    routes_num = 0
    for f in evaluation_2_files:
        print f
        for line in open(infos["output_dir"]+"/"+f,"r"):
            line = line.strip().split(" ")
            if(line[3] == "0,0"):
                #all_dist.append(1.0)
                continue
            routes_num += 1
            lcz_stats = line[3].split(";")
           
            covered = 0
            for i,stat in enumerate(lcz_stats):
                lcz_size,lcz_intersect = stat.split(",")
                lcz_size = int(lcz_size)
                lcz_intersect = int(lcz_intersect)
                if(lcz_intersect > 0):
                    covered += 1
            all_dist.append(float(covered)/len(lcz_stats))
    
    uniq_values = list(set(all_dist))
    uniq_values.sort()
    x = uniq_values
    y = [all_dist.count(i)/float(routes_num) for i in x]
    cdf = np.cumsum(y)


    plt.step(x,cdf, where="post")

    plt.xlabel("% LCZ Covered",fontsize=16)
    plt.ylabel("CDF of all overlapping changed routes",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.savefig('out_cdf_route_coverage.pdf')
    plt.clf()
コード例 #3
0
def graph_cdf_lcz_intersect_relation(infos):
    
    intersectionpp_files = [x for x in listdir(infos["output_dir"])
        if x.find("intersectionspp") != -1]

    relation = list()
    intersect_null = 0
    for f in intersectionpp_files:
        print f
        for line in open(infos["output_dir"]+"/"+f,"r"):
            
            line_parts = line.split(" ")
            for lcz in line_parts[3].split("#"):
                lcz_old, lcz_new, intersect = lcz.split(";")
                if(intersect == ""):
                    intersect_null += 1
                    continue
                lcz_old = set([",".join(x) for x in path.hops_fromstr(lcz_old)]) 
                intersect = set([",".join(x) for x in path.hops_fromstr(intersect)]) 
                lcz_old -= set(["255.255.255.255"])
                intersect -= set(["255.255.255.255"])
                len_lcz_old = len(lcz_old)
                len_lcz_intersect = len(intersect)
                relation.append(float(len_lcz_intersect)/len_lcz_old)

    total_values = float(len(relation))
    uniq_values = list(set(relation))
    uniq_values.sort()
    x = uniq_values
    y = [relation.count(i)/total_values for i in x]
    cdf = np.cumsum(y)

    plt.step(x,cdf, where="post")

    plt.xlabel("intersect_size/detection_lcz_old",fontsize=16)
    plt.ylabel("CDF of all overlap between detection and routes",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.xlim(0.0, 1.0)
    plt.savefig('out_lcz_intersect_relation.pdf')
    plt.clf()
コード例 #4
0
    guessIs = np.mean(Int)*0.3
    guessLam = np.mean(Int)
    fitIc, fitIs = pdfs.fitMR(binsS,histS,guessIc, guessIs)
    fitMR = pdfs.modifiedRician(binsS,fitIc,fitIs)
    fitLam = pdfs.fitPoisson(binsS,histS,guessLam)
    fitPoisson = pdfs.poisson(binsS,fitLam)
    fitMu,fitSig =pdfs.fitGaussian(binsS,histS,guessLam,np.std(Int))
    fitGaussian = pdfs.gaussian(binsS,fitMu,fitSig)

    sww, swp = stats.shapiro(phInt)
    print stats.shapiro(phInt)
    print "Shapiro Wilks W,p = %3.3f, %3.3f"%(sww,swp)
    print "Ic, Is = %3.3f, %3.3f"%(fitIc, fitIs)
    print "Ic/Is = %3.3f"%(fitIc/fitIs)

    plt.step(binsS,histS,color="grey",label=r"Histogram of intensities",where="mid")
    plt.plot(binsS,fitMR,color="black",linestyle="-.",label=r"MR fit to histogram: Ic=%2.2f, Is=%2.2f"%(fitIc, fitIs))
    #plt.plot(binsS,fitPoisson,linestyle="--",color="black",label=r"Poisson fit to histogram: $\lambda$=%2.2f"%(fitLam))
    plt.plot(binsS,fitGaussian,linestyle=":",color="black",label=r"Gaussian fit to histogram: $\mu$=%2.2f, $\sigma$=%2.2f"%(fitMu,fitSig))
    plt.legend()
    plt.xlabel(r"Intensity",fontsize=14)
    plt.ylabel(r"Probability",fontsize=14)
    plt.title(r"Intensity Distribution, %s pixel (%i,%i), t=%i ms"%(target,i,j,shortT*1000),fontsize=14)
    #plt.show()
    plt.clf()
    #plt.savefig(basePath+'%i_%i_%ims_Dist.png'%(i,j,shortT*1000))
    wilks.append(sww)
    intTimes.append(shortT)
    ratios.append(fitIc/fitIs)

コード例 #5
0
import numpy as np
import scipy
import matcompat

# if available import pylab (from matlibplot)
try:
    import matplotlib.pylab as plt
except ImportError:
    pass

syst_fake = tf(np.array(np.hstack((1.))), np.array(np.hstack((1., 2., 3.))))
syst_fake_dis = c2d(syst_fake, 0.01)
[output, t] = plt.step(syst_fake_dis)
plt.plot(output)
out_len = length(output)
input = np.arange(1., (out_len)+1)
input[:] = 1.
[num, den] = stmcb_test(output, input, 0., 2.)
#% sys_model=tf(num,den,0.01)
#% step(sys_model)
#% hold on
#% step(syst_fake)
コード例 #6
0
gs.update(left=0.085, right=0.89, top=0.88, bottom=0.115, wspace=0.02*9/12.0, hspace=0.02)

ax = []

ax.append(plt.subplot(gs[0,0]))
ax.append(plt.subplot(gs[1,0]))
ax.append(plt.subplot(gs[0,1]))
ax.append(plt.subplot(gs[1,1]))

# Plot Cs-137 spectrum
plt.sca(ax[0])
dataset = 'Cs-Plastic/'
onset,full,tail = np.loadtxt(mDir + dataset + 'EJ-0000',unpack=True)
n,b = np.histogram(full,120,[0,1.2])
x = b[:-1] + 0.5*np.diff(b)
plt.step(x*charge_scale_factor,n/1000.0,where='mid',color=colors.black)
plt.text(0.66*charge_scale_factor,3.05,'477 keV',va='center',ha='right',fontsize=textFS,color=colors.black)

c = (x > 0.45) * (x < 0.75)
p0 = [100,0.6,0.05,-100,0]
x2,pars,xfit,yfit = ef.arbFit(ff,x[c],n[c],'Poisson',p0)
print pars[0][1],pars[2][1], pars[0][2]
plt.axvline(pars[0][1]+pars[0][2],c='k',ls='dashed')
plt.plot(xfit*charge_scale_factor ,yfit/1000.0,color=colors.red,lw=2)
plt.yticks([0,1,2,3])
plt.xlim(0,0.9)
plt.ylim(0,3.5)
plt.xticks([0,0.2,0.4,0.6,0.8])
plt.text(0.075,0.275,r'$^{137}$Cs',ha='left',va='center',fontsize=textFS)
plt.tick_params(axis='x',labelbottom='off',labeltop='on')
ax[0].xaxis.set_label_position("top")
コード例 #7
0
ファイル: quadcopter_UKF.py プロジェクト: wuyou33/craz-ject
    vk = np.dot(R, np.random.randn(12, 1))
    zk = xk + vk
    #zk = yk + vk

    xhatk, P = discrete_UKF_update(xhatkm1, ukm1, zk, quadcopter_dynamics, H,
                                   param, P, Q, R)

    omegavec[0:4, ii:ii + 1] = ukm1
    states[0:12, ii:ii + 1] = xk
    measurements[0:12, ii:ii + 1] = zk
    estimates[0:12, ii:ii + 1] = xhatk

# Visualize simulation
if 1:  # Omega
    plt.figure(1)
    plt.step(np.transpose(timevec), np.transpose(omegavec))
    plt.legend(['$\omega_1$', '$\omega_2$', '$\omega_3$', '$\omega_4$'])
    plt.xlabel('Time, [s]')
    plt.ylabel('Angular velocity, [rad/s]')

if 1:  # States
    plt.figure(2)
    f, axarr = plt.subplots(2, 2)
    axarr[0, 0].step(np.transpose(timevec), np.transpose(states[0:3, :]))
    axarr[0, 0].set_xlabel('Time, [s]')
    axarr[0, 0].set_ylabel('$\mathbf{r}(t)$')
    axarr[0, 0].legend(['x', 'y', 'z'], loc=3, fontsize=6)

    axarr[0, 1].step(np.transpose(timevec), np.transpose(states[3:6, :]))
    axarr[0, 1].set_xlabel('Time, [s]')
    axarr[0, 1].set_ylabel('$\dot{\mathbf{r}}(t)$')
コード例 #8
0
ファイル: plot.py プロジェクト: ajmendez/PySurvey
def hist(x,
         bins,
         weight=None,
         weights=None,
         index=None,
         norm=None,
         frac=False,
         total=False,
         dist=False,
         cumulative=False,
         revcumulative=False,
         bottom=None,
         filled=False,
         **kwargs):
    '''
    norm == set max value of hist
    frac == return the fraction index is of the entire sample 
    
    '''
    rotate = kwargs.pop('rotate', False)
    noplot = kwargs.pop('noplot', False)
    if bottom is None: bottom = 0.0
    if index is not None:
        xx = x[index]
        if weight is not None and weights is not None:
            raise ValueError(
                'Only supply one.  This is just so that you can push weight and weights'
            )
        if weight is not None:
            ww = weight[index]
        elif weights is not None:
            ww = weights[index]
        else:
            ww = None
    else:
        xx = x
        ww = weight

    v, l = np.histogram(xx, bins, weights=ww)
    d = np.diff(l)
    l = l[:-1] + d / 2.0

    if frac:
        vv = np.histogram(x, bins, weight)[0]
        ii = np.where(vv == 0)
        v = np.array(v) * 1.0 / np.array(vv)
        v[ii] = 0

    if cumulative:
        v = np.cumsum(v)
    if revcumulative:
        v = np.cumsum(v[::-1])[::-1]

    if norm is not None:
        v = v / float(np.max(v)) * float(norm)
    if total:
        v = v / (1.0 * np.sum(v))

    if dist:
        v /= d

    if bottom is not None:
        v += bottom
    # if rotate:
    #     l,v = v,l
    if not noplot:
        if filled:
            kwargs.setdefault('align', 'center')
            # hack to fix pylab.bar's coloring
            if 'color' not in kwargs:
                kwargs['color'] = next(pylab.gca()._get_lines.color_cycle)
            if rotate:
                pylab.barh(l, v - bottom, height=d, left=bottom, **kwargs)
            else:
                pylab.bar(l, v - bottom, width=d, bottom=bottom, **kwargs)
        else:
            if rotate:
                pylab.step(v, l, where='mid', **kwargs)
            else:
                pylab.step(l, v, where='mid', **kwargs)
    # if rotate:
    #     l,v = v,l
    return l, v
コード例 #9
0
def plot_LC_map(xlocs, ylocs, LCmapFile, inspect=False):

    with open(LCmapFile, 'rb') as handle:
        LCmap = pickle.load(handle)

    # print np.shape(LCmap)
    LCmap = LCmap[:, :, :]
    # plt.plot(LCmap[60,40])
    # plt.figure()
    # LCmap = temp.downsample(LCmap, factor=10)
    # plt.plot(LCmap[60,40])
    # plt.show()

    print np.shape(LCmap)

    # xinspect = range(35,45)
    # yinspect = range(85,95)

    total_map = np.sum(LCmap, axis=2)
    median_map = np.median(LCmap, axis=2)
    interval_map = np.sum(LCmap[:, :, :100], axis=2)

    # if inspect:
    #     plt.imshow(median_map[yinspect[0]:yinspect[-1],xinspect[0]:xinspect[-1]])
    #     plt.show()

    quicklook_im(total_map, logAmp=True, show=False)
    quicklook_im(median_map, logAmp=True, show=False)
    quicklook_im(interval_map, logAmp=True, show=False)

    if os.path.isfile(IratioFile):
        with open(IratioFile, 'rb') as handle:
            Ic, Is, Iratio, mIratio = pickle.load(handle)
        print np.shape(Iratio)
    else:
        Iratio = np.zeros((len(xlocs), len(ylocs)))
        Ic = np.zeros((len(xlocs), len(ylocs)))
        Is = np.zeros((len(xlocs), len(ylocs)))
        mIratio = np.zeros((len(xlocs), len(ylocs)))
        for ix, xloc in enumerate(xlocs):
            for iy, yloc in enumerate(ylocs):
                if (ix * len(ylocs) + iy) % 100 == 0:
                    misc.progressBar(value=(ix * len(ylocs) + iy),
                                     endvalue=len(xlocs) * len(ylocs))
                ints = LCmap[ix, iy]

                ID = pipe.get_intensity_dist(ints)
                bincent = (ID['binsS'] + np.roll(ID['binsS'], 1)) / 2.
                bincent = np.array(bincent)[1:]

                # popt, _ = curve_fit(gaussian, ID['binsS'][:-1], ID['histS'])
                # plt.plot(ID['binsS'][:-1], gaussian(ID['binsS'][:-1], *popt), 'r--')
                # popt, _ = curve_fit(poisson, ID['binsS'][:-1], ID['histS'])
                # plt.plot(ID['binsS'][:-1], poisson(ID['binsS'][:-1], *popt), 'g--')
                # bincent = np.linspace(0,100000,len(bincent))
                # gauss = gaussian2(bincent, 1000,np.mean(ints)) + 0.0001 * np.random.normal(size=bincent.size)
                # popt, _ = curve_fit(gaussian2, bincent, gauss, p0=[100,100])
                # print popt

                # plt.plot(bincent, gauss)
                # plt.plot(bincent, gaussian2(bincent, *popt), 'g--')
                # print sum(gauss)
                # print np.mean(ints)

                guessIc = np.mean(ints) * 0.7
                guessIs = np.mean(ints) * 0.3

                # popt, _ = curve_fit(MR, bincent, gauss, p0=[guessIc,guessIs])
                # print popt
                # plt.plot(bincent, MR(bincent, *popt), 'g--')
                # print sum(MR(bincent, *popt))

                # popt, _ = curve_fit(func, bincent, ID['histS'])
                # plt.plot(bincent, func(bincent, *popt), 'r--')
                try:
                    popt, _ = curve_fit(MR,
                                        bincent,
                                        ID['histS'],
                                        p0=[guessIc, guessIs])
                    Ic[ix, iy] = popt[0]
                    Is[ix, iy] = popt[1]
                    Iratio[ix, iy] = popt[0] / popt[1]
                    m = (np.sum(ints) - (popt[0] + popt[0])) / (
                        np.sqrt(popt[1]**2 + 2 * popt[0] + popt[1]) *
                        len(ints))
                    mIratio[ix, iy] = m**-1 * (Iratio[ix, iy])
                except RuntimeError:
                    pass
                # print np.shape(ints)
                # EI = np.sum(ints)
                # # print EI
                # EI2 = EI**2
                # # print EI2
                # var = np.var(ints)
                # # print var
                # Is[ix,iy] = EI-np.sqrt(EI2-var)
                # # print Is[ix,iy]
                # Ic[ix,iy] = EI-Is[ix,iy]
                # # print Ic[ix,iy]
                # Iratio[ix,iy] = Ic[ix,iy]/Is[ix,iy]
                # exit()
                if inspect == True:  # and xloc in yinspect and yloc in xinspect:
                    plt.figure()
                    plt.plot(ints)
                    print xloc, yloc
                    plt.figure()
                    plt.step(bincent, ID['histS'])
                    plt.plot(bincent, MR(bincent, *popt), 'b--')
                    print popt, popt[0] / popt[1]
                    plt.show()

        with open(IratioFile, 'wb') as handle:
            pickle.dump([Ic, Is, Iratio, mIratio],
                        handle,
                        protocol=pickle.HIGHEST_PROTOCOL)

    quicklook_im(Ic, logAmp=True, show=False)  #, vmax=25)#
    quicklook_im(Is, logAmp=True, show=False)  #,vmax=5,)#
    quicklook_im(Iratio, logAmp=True, show=False)  #,vmax=25,)#
    quicklook_im(mIratio, logAmp=True, show=False)  #, vmax=5,)#
    quicklook_im(mIratio * Iratio, logAmp=True, show=False)  #, vmax=500,)#
    plt.show()
    return total_map, median_map, interval_map, Iratio, mIratio
コード例 #10
0
ファイル: plot.py プロジェクト: ajmendez/PySurvey
def hist(x,bins, weight=None, weights=None, index=None, 
         norm=None, frac=False, total=False, dist=False,
         cumulative=False, revcumulative=False,
         bottom=None, filled=False, 
         **kwargs):
    '''
    norm == set max value of hist
    frac == return the fraction index is of the entire sample 
    
    '''
    rotate = kwargs.pop('rotate',False)
    noplot = kwargs.pop('noplot',False)
    if bottom is None: bottom=0.0
    if index is not None:
        xx = x[index]
        if weight is not None and weights is not None:
            raise ValueError('Only supply one.  This is just so that you can push weight and weights')
        if weight is not None:
            ww = weight[index]
        elif weights is not None:
            ww = weights[index]
        else:
            ww = None
    else:
        xx = x
        ww = weight
    
    v,l = np.histogram(xx,bins,weights=ww)
    d = np.diff(l)
    l = l[:-1] + d/2.0
    
    if frac:
        vv = np.histogram(x,bins, weight)[0]
        ii = np.where(vv == 0)
        v = np.array(v)*1.0/np.array(vv)
        v[ii] = 0
    
    if cumulative:
        v = np.cumsum(v)
    if revcumulative:
        v = np.cumsum(v[::-1])[::-1] 
    
    if norm is not None:
        v = v/float(np.max(v))*float(norm)
    if total:
        v = v / (1.0*np.sum(v))
    
    if dist:
      v /= d
    
    if bottom is not None:
        v += bottom
    # if rotate:
    #     l,v = v,l
    if not noplot:
        if filled:
            kwargs.setdefault('align', 'center')
            # hack to fix pylab.bar's coloring 
            if 'color' not in kwargs:
                kwargs['color'] = next(pylab.gca()._get_lines.color_cycle)
            if rotate:
                pylab.barh(l,v-bottom, height=d, left=bottom, **kwargs)
            else:
                pylab.bar(l,v-bottom, width=d, bottom=bottom, **kwargs)
        else:
            if rotate:
                pylab.step(v,l, where='mid', **kwargs)
            else:
                pylab.step(l,v, where='mid', **kwargs)
    # if rotate:
    #     l,v = v,l
    return l,v
コード例 #11
0
def graph_cdf_changed_overlap_2(infos):
    
    intersectionpp_files = [x for x in listdir(infos["output_dir"])
        if x.find("intersectionspp") != -1]

    intersect_num = 0
    changed_overlap = list()
    
    num_overlapping = 0
    for f in intersectionpp_files:
        print f
        for line in open(infos["output_dir"]+"/"+f,"r"):
            infos["node"] = f.split(".",1)[1]

            line_parts = line.split(" ")

            overlap_oldpath = path.path_fromstr(line_parts[7])
            overlap_newpath = path.path_fromstr(line_parts[11])

            lczs_detection_old = [path.hops_fromstr(x.split(";")[0]) \
                    for x in line_parts[3].split("#")]

            lczs_detection_new = [path.hops_fromstr(x.split(";")[1]) \
                    for x in line_parts[3].split("#")]
           
            intersections = [x.split(";")[2] \
                    for x in line_parts[3].split("#") ]

            changes_old_str = line_parts[13]
            changes_new_str = line_parts[15]

            changes_old = []
            changes_new = []
            if changes_old_str != "empty":
                changes_old = [x.strip(",").split(",") for x in \
                    changes_old_str.split("#") if x]
                changes_new = [x.strip(",").split(",") for x in \
                    changes_new_str.split("#") if x]

            statistical.fix_branch_join(overlap_oldpath,overlap_newpath,\
                    changes_old,changes_new)
           
            for j,intersection in enumerate(intersections):
                intersection_hops = path.hops_fromstr(intersection)
                intersection_hops = [x for x in intersection_hops \
                    if x != ["255.255.255.255"]]
               
                if(not intersection_hops):
                    continue

                total_coverage = set()
                for i,change_zone_old in enumerate(changes_old):

                    change_zone_old_hops = path.path_get_subpath(overlap_oldpath,\
                        change_zone_old)
                    coverage_change_zone = statistical.list_intersection(\
                        change_zone_old_hops, intersection_hops)
      
                    if(coverage_change_zone):
                        total_coverage = set([",".join(x) for x in coverage_change_zone]) 
                        break

                set_intersect = set([",".join(x) for x in intersection_hops]) 
                v = float(len(total_coverage)) / len(set_intersect)
                changed_overlap.append(v)
                num_overlapping += 1
                break
    
    uniq_values = list(set(changed_overlap))
    x = uniq_values
    x.sort()
    y = [changed_overlap.count(i)/float(num_overlapping) for i in x]
    cdf = np.cumsum(y)


    plt.step(x,cdf, where="post")

    plt.xlabel("% overlap changed",fontsize=16)
    plt.ylabel("CDF of all overlaps in overlapping routes",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.savefig('out_cdf_changed_overlap_Y2.pdf')
    plt.clf()
コード例 #12
0
def graph_generate_cdf_lcz_size(infos):
    
    evaluation_2_files = [x for x in listdir(infos["output_dir"])
        if x.find("evaluation_2") != -1]
    covered_dist = defaultdict(lambda: 0)
    uncovered_dist = defaultdict(lambda: 0)
    all_dist = defaultdict(lambda: 0)
    
    covered_number = 0
    uncovered_number = 0
    all_number = 0

    for f in evaluation_2_files:
        print f
        for line in open(infos["output_dir"]+"/"+f,"r"):
            line = line.strip().split(" ")
            if(line[3] == "0,0"):
                continue
            lcz_stats = line[3].split(";")
            lcz_ttls = line[4].split(";")
            for i,stat in enumerate(lcz_stats):
                lcz_size,lcz_intersect = stat.split(",")
                lcz_size = int(lcz_size)
                lcz_intersect = int(lcz_intersect)
                if(lcz_intersect > 0):
                    covered_dist[lcz_size] += 1
                    covered_number += 1
                else:
                    uncovered_dist[lcz_size] += 1
                    uncovered_number += 1
                all_dist[lcz_size] += 1
                all_number += 1
    
    x = [z for z in covered_dist.keys()]
    x.sort()
    y = [covered_dist[i]/float(covered_number) for i in x]
    cdf = np.cumsum(y)

    plt.step(x,cdf,label="covered LCZ", where="post")

    x = [z for z in uncovered_dist.keys()]
    x.sort()
    y = [uncovered_dist[i]/float(uncovered_number) for i in x]
    cdf = np.cumsum(y)
    
    plt.step(x,cdf,label="uncovered LCZ", where="post")
    
    x = [z for z in all_dist.keys()]
    x.sort()
    y = [all_dist[i]/float(all_number) for i in x]
    cdf = np.cumsum(y)
    
    plt.step(x,cdf,linestyle="--",label="all LCZ", where="post")


    plt.xlabel("LCZ Size",fontsize=16)
    plt.ylabel("CDF of LCZ in overlapping changed routes",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.legend(loc='lower right')
    plt.savefig('out_cdf_lcz_size.pdf')
    plt.clf()
コード例 #13
0
def graph_cdf_lcz_intersect_jaccard(infos):
    
    intersectionpp_files = [x for x in listdir(infos["output_dir"])
        if x.find("intersectionspp") != -1]

    relation = list()
    intersect_null = 0
    jaccard_indexes = list()

    for f in intersectionpp_files:
        print f
        for line in open(infos["output_dir"]+"/"+f,"r"):
            infos["node"] = f.split(".",1)[1]

            line_parts = line.split(" ")

            overlap_oldpath = path.path_fromstr(line_parts[7])
            overlap_newpath = path.path_fromstr(line_parts[11])

            lczs_detection_new = [path.hops_fromstr(x.split(";")[1]) \
                    for x in line_parts[3].split("#")]
           
            intersections = [x.split(";")[2] \
                    for x in line_parts[3].split("#")]

            changes_old_str = line_parts[13]
            changes_new_str = line_parts[15]

            changes_old = []
            changes_new = []
            if changes_old_str != "empty":
                changes_old = [x.strip(",").split(",") for x in \
                    changes_old_str.split("#") if x]
                changes_new = [x.strip(",").split(",") for x in \
                    changes_new_str.split("#") if x]
            else:
                continue

            statistical.fix_branch_join(overlap_oldpath,overlap_newpath,\
                    changes_old,changes_new)

            for i,change_zone_old in enumerate(changes_old):
                change_zone_new = changes_new[i]

                change_zone_old_hops = path.path_get_subpath(overlap_oldpath,\
                    change_zone_old)
                change_zone_new_hops = path.path_get_subpath(overlap_newpath,\
                    change_zone_new)

                for j,intersection in enumerate(intersections):

                    intersection_hops = path.hops_fromstr(intersection)
                    coverage_change_zone_old = statistical.list_intersection(\
                        change_zone_old_hops,intersection_hops)
                    # stars does not make intersections
                    coverage_change_zone = [x for x in coverage_change_zone_old \
                        if x != ["255.255.255.255"]]
                    if(coverage_change_zone):
                        
                        set_1 = set([",".join(x) for x in change_zone_new_hops])
                        set_2 = set([",".join(x) for x in lczs_detection_new[j]])
                        intersection = len(set_1.intersection(set_2))
                        union = len(set_1.union(set_2))
                        jaccard_indexes.append(float(intersection)/union)
   
    total_values = float(len(jaccard_indexes))
    uniq_values = list(set(jaccard_indexes))
    uniq_values.sort()
    x = uniq_values
    y = [jaccard_indexes.count(i)/total_values for i in x]
    cdf = np.cumsum(y)

    plt.step(x,cdf, where="post")

    plt.xlabel("jaccard index (detection new, overlap new)",fontsize=16)
    plt.ylabel("CDF of all LCZ in overlapping changed routes\nthat intersect "+\
        "detection",fontsize=12)

    plt.ylim(0.0, 1.0)
    plt.xlim(0.0, 1.0)
    plt.savefig('out_lcz_jaccard22.pdf')
    plt.clf()
コード例 #14
0
if opts.npz is not None: npzname = opts.npz + '.npz'
else: npzname = '%s_%s_%s_RFI.npz' % (jd, opts.ant, opts.pol)

if opts.verb: print 'Writing data to %s' % npzname
np.savez(npzname, grid=flg_arr, dJDs=t_arr, percent_f=pcnt_f, percent_t=pcnt_t)

#If you don't want plots, let's save everyone a smidgen of time and quit now
if not opts.show and not opts.save_wfall and not opts.save_freq: sys.exit()

##Plotting freq occupancy

if opts.save_freq or opts.show:
    if opts.verb: print 'Plotting frequency-occupancy plot'
    if not opts.chanaxis:
        pylab.step(fqs, pcnt_f, where='mid')
        pylab.fill_between(fqs, 0, pcnt_f, color='blue', alpha=0.3)
        pylab.xlim(fqs[0], fqs[-1])
        pylab.xlabel('Frequency [MHz]')
    else:
        pylab.step(chans, pcnt_f, where='mid')
        pylab.fill_between(chans, 0, pcnt_f, color='blue', alpha=0.3)
        pylab.xlim(chans[0], chans[-1])
        pylab.xlabel('Channel number')
    pylab.ylabel('Occupancy [%]')
    if len(args) == 1: pylab.suptitle(file2jd(args[0]), size=15)
    else:
        pylab.suptitle('%s - %s' %
                       (file2jd(args[0]), file2jd(args[len(args) - 1])),
                       size=15)
コード例 #15
0
import matplotlib.pylab as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages

# mp.rc('font', family = 'serif', serif = 'cmr10')
mp.rcParams['mathtext.fontset'] = 'cm'
mp.rcParams.update({'font.size': 16})

n=3
α = np.array([0.8,1.0,1.2])
X = np.array([8/15,5/15,2/15])
with PdfPages('water_filling_plot.pdf') as pdf:
	axis = np.arange(0.5,n+1.5,1)
	index = axis+0.5
	# X = np.asarray(x).flatten()
	Y = α + X

	# to include the last data point as a step, we need to repeat it
	A = np.concatenate((α,[α[-1]]))
	X = np.concatenate((X,[X[-1]]))
	Y = np.concatenate((Y,[Y[-1]]))

	plt.xticks(index)
	plt.xlim(0.5,n+0.5)
	plt.ylim(0,1.5)
	plt.step(axis,A,where='post',label =r'$\alpha$',lw=2)
	plt.step(axis,Y,where='post',label=r'$\alpha + x$',lw=2)
	plt.legend(loc='lower right')
	plt.xlabel('channel number')
	plt.ylabel('power level')
	pdf.savefig(bbox_inches='tight')
コード例 #16
0
def montecarlorisk(num_trials, annual_escalation, subsystem, output_file):
    ## define output location; if variable output_file is true then output goes to test.txt in working directory
    fhold = sys.stdout
    if output_file:
        f = open('./test.txt', 'w')
        sys.stdout = f

    #########################################################################################
    ###################### Some basic values ###############################
    #########################################################################################
    total_contingency = 230694.0  # total contingency in K EUR  TBD !!!!
    nyears = 8  ## number of years with construction activity
    date_start = "2021-07-01"
    date_end = "2029-07-31"
    date_commissioning_start = "2029-08-01"
    date_base_year = "2021"
    date_year_start = "2021"
    date_year_end = "2029"
    annual_esc = 1.0 + annual_escalation  # convert annual fractional escalation to factor
    yer = [
        '2021', '2022', '2023', '2024', '2025', '2026', '2027', '2028', '2029'
    ]

    final_totals_distribution = []
    #cost_lowest = np.zeros(1000)
    #cost_expected = np.zeros(1000)
    #cost_highest = np.zeros(1000)

    subsystem = subsystem.upper()
    if subsystem == 'ALL':
        fundingstring = " "
        projectname = "SKA"
    elif subsystem == 'MID':
        fundingstring = " AND component = 'MID' "
        projectname = 'MID'
    elif subsystem == 'LOW':
        fundingstring = " AND component = 'LOW' "
        projectname = 'LOW'
    elif subsystem == 'OCS':
        fundingstring = " AND component = 'OCS' "
        projectname = 'OCS'
    elif subsystem == 'PM':
        fundingstring = " AND component = 'PM' "
        projectname = 'PM'

    ##############################################################################
    ################### Simple escalation model
    ##############################################################################
    escalate = array(10)
    sum = 0.0
    escalate = {}  # a dictionary
    escalate[date_base_year] = 1.0
    for jj in range(nyears):
        escalate[yer[jj + 1]] = escalate[yer[jj]] * annual_esc
        sum += escalate[yer[jj + 1]]
    escalate['dist_sum'] = sum / nyears

    server = "https://jira.skatelescope.org"
    auth_inf = (username, password)
    try:
        jira = JIRA(server=server, basic_auth=auth_inf)
    except:
        print(
            "ERROR: Jira authentication failed. Have you provided the correct username and password?"
        )
        return

    # AND (cf[12916] is EMPTY OR cf[12916] ='False')

    query = "project=RM AND issuetype='RM-Risk' AND status='Active Risk/Opportunity' " + fundingstring + "ORDER BY cf[12933]"
    fields = "components,summary,customfield_12926,customfield_12901,customfield_12905,customfield_12915,customfield_12933,customfield_12936,customfield_12938,description"
    print(('\n\r Query to database \n\r\n\r' + query + '\n\r'))
    issues = jira.search_issues(query, maxResults=None, fields=fields)
    nrisks = len(issues)
    rows = []

    mean_prob_lookup = {
        '2%': 0.02,
        '5%': 0.05,
        '10%': 0.1,
        '25%': 0.25,
        '50%': 0.5,
        '80%': 0.8
    }
    rows = []
    for i in range(len(issues)):
        rows.append({
            'riskid':
            int(''.join([i for i in issues[i].key if i.isdigit()])),
            'projectsystem':
            xstr(issues[i].fields.components[0].name),
            'current_probability':
            xstr(issues[i].fields.customfield_12926),  #map from 13200
            'current_expense_expected':
            (float(issues[i].fields.customfield_12901)
             if issues[i].fields.customfield_12901 else 0.0),  #map from 13404
            'current_schedule_cost_expected':
            (float(issues[i].fields.customfield_12905)
             if issues[i].fields.customfield_12905 else 0.0),  #map from 13606
            'meanprobability':
            mean_prob_lookup[
                issues[i].fields.customfield_12926.value],  #map from 13200
            'total_cost':
            0.0,
            'obligationmodel':
            xstr(issues[i].fields.customfield_12915),  #map from 13107
            'triggerdate': (datetime.datetime.strptime(
                issues[i].fields.customfield_12933, '%Y-%m-%d').date()
                            if issues[i].fields.customfield_12933 else
                            datetime.date(2000, 1, 1)),  #map from 13108
            'randomtrigger':
            (int(issues[i].fields.customfield_12938)
             if issues[i].fields.customfield_12938 else 0),  #map from 13110
            'risktitle':
            xstr(issues[i].fields.summary),
            'riskdescription':
            xstr(issues[i].fields.description),
            'randomperiod':
            xstr(issues[i].fields.customfield_12936)
        })  # map from 13111

    # setup lists
    nyears = [1 for i in range(nrisks)]
    riskheader = ['     ' for i in range(20000)]
    riskid = []  # issue.key
    projectsystem = []  # issue.fields.components
    current_probability = []  # issue.fields.customfield_12926
    current_expense_expected = []  # issue.fields.customfield_12901
    current_schedule_cost_expected = []  # issue.fields.customfield_12905
    meanprobability = []  # calculate from cf 12926
    total_cost = []  # issue.fields.customfield_12905 + issue.customfield_12901
    obligationmodel = []  # issue.fields.customfield_12915
    triggerdate = []  # issue.fields.customfield_12933
    randomtrigger = [
    ]  # issue.fields.customfield_12936 and issue.customfield_12938
    risktitle = []  # issue.fields.summary
    riskdescription = []  # issue.fields.description
    randomperiod = []

    ## Rule 0  - Accept all risks, simple passthrough
    ##    print "\n\r Rule 1 - Accept only risks that have total cost of more than €1M \n\r"
    ##        print "\n\r Rule 2 - Accept only risks that have expected exposure of more that €200K \n\r"
    ##        print "\n\r Rule 3 - Accept risks that pass Rule 1 OR Rule 2 \n\r"
    ## Store the database values into arrays

    print('\n\r Summary of risks ordered by triggerdate \n\r\n\r')
    for ii in range(nrisks):
        lasttotalcost = (float(rows[ii]['current_expense_expected']) +
                         float(rows[ii]['current_schedule_cost_expected']))

        ##############################################################################
        ################### Use simple model of escalation to convert to as-spent dollars
        ##############################################################################

        if rows[ii]['obligationmodel'] == "trigger":
            yr = rows[ii]['triggerdate'].year
            yr = max(int(date_year_start), int(yr))
            yr = min(int(date_year_end), int(yr))
            lasttotalcost = lasttotalcost * escalate[str(yr)]
        else:
            lasttotalcost = lasttotalcost * escalate['dist_sum']

        ##############################################################################

        if lasttotalcost >= 0.00:
            ## print("\n\r Rule 0  - Accept all risks, simple passthrough \n\r")
            ## Rule 1 - Accept only risks that have total cost of more than €1M
            ##        if lasttotalcost >= 1000.00:
            ## Rule 2 - Accept only risks that have expected exposure of more that €200K
            ##        if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0:
            ## Rule 3 - Accept risks that pass Rule 1 OR Rule 2
            ##       if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0 or lasttotalcost >= 1000.00:
            riskid.append(rows[ii]['riskid'])
            projectsystem.append(rows[ii]['projectsystem'])
            current_probability.append(rows[ii]['current_probability'])
            current_expense_expected.append(
                rows[ii]['current_expense_expected'])
            current_schedule_cost_expected.append(
                rows[ii]['current_schedule_cost_expected'])
            meanprobability.append(float(rows[ii]['meanprobability']))
            obligationmodel.append(rows[ii]['obligationmodel'])
            triggerdate.append(rows[ii]['triggerdate'])
            randomtrigger.append(rows[ii]['randomtrigger'])
            risktitle.append(rows[ii]['risktitle'])
            riskdescription.append(rows[ii]['riskdescription'])
            total_cost.append(lasttotalcost)
            randomperiod.append(rows[ii]['randomperiod'])

            ## Print formatted output
            print(
                '{:>30} RM-{:4} {:>10}  {:>22} {:>5} [{:>8.2f} {:>8.2f}] {:>8.2f}   {:40} {:80}'
                .format(
                    rows[ii]['projectsystem'],
                    str(rows[ii]['riskid']),
                    str(rows[ii]['triggerdate']),
                    #rows[ii]['obligationmodel'][0:4],
                    rows[ii]['obligationmodel'],
                    #rows[ii]['randomtrigger'] % 1000,
                    rows[ii]['randomtrigger'],
                    lasttotalcost,
                    rows[ii]['meanprobability'],
                    float(rows[ii]['meanprobability']) * lasttotalcost,
                    str(rows[ii]['risktitle']),
                    str(rows[ii]['riskdescription']),
                ))
        nrisks = len(riskid)
    ##   Print risks ordered by riskid
    print(('\n\r Summary of {:>3} risks ordered by riskid \n\r\n\r'.format(
        str(nrisks))))
    hold_riskid, hold_projectsystem, hold_risktitle = (list(t) for t in zip(
        *sorted(zip(riskid, projectsystem, risktitle))))
    for ii in range(nrisks):
        print('{:>30} RM-{:3}   {:40}'.format(hold_projectsystem[ii],
                                              str(hold_riskid[ii]),
                                              hold_risktitle[ii]))

    ## Print risk description ordered by totalcost
    print(('\n\r Summary of {:>3} risks ordered by totalcost \n\r\n\r'.format(
        str(nrisks))))
    hold_total_cost, hold_riskdescription, hold_projectsystem, hold_riskid, hold_meanprobability = (
        list(t)
        for t in zip(*sorted(zip(total_cost, riskdescription, projectsystem,
                                 riskid, meanprobability),
                             reverse=True)))

    for ii in range(nrisks):
        print('{:>30} RM-{:3} €{:8,.7}K [{:<4}]   {:<100}'.format(
            hold_projectsystem[ii], str(hold_riskid[ii]), hold_total_cost[ii],
            hold_meanprobability[ii], hold_riskdescription[ii]))

    ## Figure 4
    ##  Interaction loop over risks. Also, plot fig 4 with the risk spend curve
    max_hold = 0.0
    fig4 = plt.figure(4)
    ax1 = fig4.add_subplot(111)
    ###################################################################
    ############ Begin main Monte Carlo iteration loop ################
    ###################################################################
    for ii in range(num_trials):
        delta_this_iteration = []
        triggerdate_this_iteration = []
        projectsystem_this_iteration = []
        riskid_this_iteration = []
        ###################################################################
        ############ Random loop over each risk ################
        ###################################################################
        ##
        ##  Each risk has a specified date of possible occurence.  A risk can occur at a specified trigger date;
        #   at some random time; or a risk may occur more than once over a specified range of dates.
        ## Trigger case
        for jj in range(nrisks):
            if obligationmodel[jj] == "Trigger date":
                choice = np.random.uniform(0.0, 1.0, 1)
                if choice <= meanprobability[jj]:
                    addit = float(total_cost[jj])
                else:
                    addit = float(0.0)
                delta_this_iteration.append(addit)
                triggerdate_this_iteration.append(triggerdate[jj])
                projectsystem_this_iteration.append(projectsystem[jj])
                riskid_this_iteration.append(int(riskid[jj]))
            ## Random case
            elif obligationmodel[jj] == "Random occurrence(s)":
                nrandom = randomtrigger[jj]
                #print("random risk; nrandom = "+str(nrandom))
                #periodcode = randomtrigger[jj] / 1000
                #print("random risk periodcode = "+str(periodcode))
                periodcode = 3

                if randomperiod[jj] == 'Construction only':
                    periodcode = 1
                elif randomperiod[jj] == 'Commissioning only':
                    periodcode = 2
                elif randomperiod[jj] == 'Both Construction and Commissioning':
                    periodcode = 3
                date1 = date_start
                date2 = date_commissioning_start
                if periodcode == 1:  # random during construction only
                    date1 = date_start
                    date2 = date_commissioning_start
                elif periodcode == 2:  # random during commissioning only
                    date1 = date_commissioning_start
                    date2 = date_end
                elif periodcode == 3:  # random throughout project
                    date1 = date_start
                    date2 = date_end
                for kk in range(nrandom):
                    stime = time.mktime(time.strptime(date1, '%Y-%m-%d'))
                    etime = time.mktime(time.strptime(date2, '%Y-%m-%d'))
                    ptime = stime + np.random.uniform(etime - stime)
                    randomdate = datetime.date.fromtimestamp(int(ptime))
                    #print(randomdate)
                    choice = np.random.uniform(0.0, 1.0)
                    if choice <= meanprobability[jj]:
                        addit = float(total_cost[jj]) / float(nrandom)
                    else:
                        addit = float(0.0)
                    delta_this_iteration.append(addit)
                    triggerdate_this_iteration.append(randomdate)
                    projectsystem_this_iteration.append(projectsystem[jj])
                    riskid_this_iteration.append(int(riskid[jj]))
            ## Distributed case
            elif obligationmodel[jj] == "Distributed occurrence":
                if ii == 0:  # only on first pass through will triggerdate always have the proper value
                    #print ii,jj,triggerdate[jj],triggerdate[jj].year
                    ny = max(
                        triggerdate[jj].year - 2021, 1
                    )  # risk is distributed over this many years but must be at least 1
                    nyears[jj] = min(
                        ny, 8
                    )  # must store the corect values of nyears for each distributed risk
                for kk in range(nyears[jj]):
                    year = 2022 + kk  #kk starts at zero.  Don't include short period in 2021
                    choice = np.random.uniform(0.0, 1.0, 1)
                    if choice <= meanprobability[jj]:
                        addit = float(total_cost[jj]) / float(nyears[jj])
                    else:
                        addit = float(0.0)
                    delta_this_iteration.append(addit)
                    triggerdate_this_iteration.append(
                        datetime.date(year, randrange(1, 12), 1)
                    )  # random month in year, always assign the first day of the month
                    projectsystem_this_iteration.append(projectsystem[jj])
                    riskid_this_iteration.append(int(riskid[jj]))
            else:
                sys.exit(" obligationmode not defined for risk " +
                         str(projectsystem[jj]) + str(riskid[jj]) + "  " +
                         str(jj))
        ###################################################################
        ############    End short random loop over risk    ################
        ###################################################################
        # Since random and distributed risks have been added the lists are no longer in date order.
        # Need to resort the two arrays by effective trigger dates using: list1, list2 = (list(t) for t in zip(*sorted(zip(list1, list2))))  - YIKES
        #print(riskid_this_iteration)
        triggerdate_this_iteration, delta_this_iteration, projectsystem_this_iteration, riskid_this_iteration = (
            list(t) for t in zip(*sorted(
                zip(triggerdate_this_iteration, delta_this_iteration,
                    projectsystem_this_iteration, riskid_this_iteration))))
        #print(type(riskid_this_iteration),riskid_this_iteration)
        #print(" ")
        #print(delta_this_iteration)
        # Compute the running sum
        xx_this_iteration = np.cumsum(delta_this_iteration)
        len_xx = len(xx_this_iteration)
        ###################################################################
        ############# Some diagnostic output  #############################
        ###################################################################
        nprintout = 5  # number of simulations with diagnostic output
        diagnostic_steps = num_trials / nprintout
        if ii % diagnostic_steps == 0:
            print(('\n\r\n\r\n\r Diagnostic output for iteration ' + str(ii) +
                   ' \n\r'))
            for mm in range(len_xx):
                header = riskheader[riskid_this_iteration[mm]]
                line = [
                    header, projectsystem_this_iteration[mm],
                    riskid_this_iteration[mm],
                    str(triggerdate_this_iteration[mm]),
                    delta_this_iteration[mm], xx_this_iteration[mm]
                ]
                print('{:>6}{:>30} RM-{:3} {:>15} {:12.1f} {:12.1f}'.format(
                    *line))
                #print(line)
        # Store the grand totals
        # reserve the storage arrays on the first iteration
        if ii == 0:
            totals = np.zeros(len_xx)
            totals2 = np.zeros(len_xx)
            #print len(xx),len_xx,len(totals),len(totals2)
        totals += xx_this_iteration
        totals2 += xx_this_iteration * xx_this_iteration
        final_totals_distribution.append(xx_this_iteration[len_xx - 1] *
                                         0.001)  # Convert from K€ to M€
        ## The step method plots the spend curve, plot only every 50th iteration line
        if ii % 50 == 0:
            #print len(triggerdate),len(xx)
            #print(triggerdate)
            #print(" ")
            #print(xx)
            pylab.step(triggerdate_this_iteration,
                       total_contingency - xx_this_iteration,
                       where='post')  # plot the spend curve using step
            max_hold = max([max_hold, max(xx_this_iteration)])
        gca().xaxis.set_major_formatter(ticker.FuncFormatter(format_date))

    ###################################################################
    ###########    End Monte Carlo iteration loop       ###############
    ###################################################################
    ## Spend curve plot labeling
    dd1 = date2num(datetime.datetime.strptime('2021-07-01', "%Y-%m-%d").date())
    dd2 = date2num(datetime.datetime.strptime('2029-07-31', "%Y-%m-%d").date())
    yyy = 5.0 * ceil(total_contingency / 5.0)
    ax1.set_ylim(0.0, yyy)
    ax1.set_xlim(dd1, dd2)
    gcf().autofmt_xdate()
    # Plot some extra bold lines in the spend curve plot
    mean = totals / num_trials
    variance = totals2 / num_trials - mean * mean
    sigma = np.sqrt(variance)

    ax1.plot(triggerdate_this_iteration,
             total_contingency - mean,
             linewidth=5.0,
             color='blue')
    ax1.plot(triggerdate_this_iteration,
             total_contingency - mean + sigma,
             linewidth=5.0,
             color='black')
    ax1.plot(triggerdate_this_iteration,
             total_contingency - mean - sigma,
             linewidth=5.0,
             color='black')
    #  Print tabular data
    #print "length of triggerdate",len(triggerdate_this_iteration),type(triggerdate_this_iteration)
    #print " mean ",len( mean ),type(mean)
    #print "length of  sigma",len( sigma),type(sigma)
    for mm in range(len(triggerdate_this_iteration)):
        line = [
            str(triggerdate_this_iteration[mm]), total_contingency - mean[mm],
            total_contingency - mean[mm] - sigma[mm],
            total_contingency - mean[mm] + sigma[mm]
        ]
        print('{:>15} ,  {:12.1f},  {:12.1f},   {:12.1f}'.format(*line))
    #  Plot the contingency funding curve in as spent EUR
    if subsystem == 'NSF':
        fundingdates = [
            datetime.date(2014, 0o7, 0o1),
            datetime.date(2014, 10, 0o1),
            datetime.date(2015, 10, 0o1),
            datetime.date(2016, 10, 0o1),
            datetime.date(2017, 10, 0o1),
            datetime.date(2018, 10, 0o1),
            datetime.date(2019, 10, 0o1),
            datetime.date(2020, 10, 0o1),
            datetime.date(2021, 10, 0o1)
        ]
        fundinglevels = [
            2600., 13100., 23600., 34100, 44600., 55100., 65600., 76100.
        ]
        print(fundingdates)
        print(fundinglevels)
    #        pylab.step(fundingdates,fundinglevels,linewidth=5.0,color='red',where='post')
    ##    ax1.set_ylim([0.0,80.])
    pylab.title('%s  Contingency spend curve in as-spent K-EUR' % projectname)
    ax1.set_xlabel('Date')
    ax1.set_ylabel('Contingency Balance (as-spent K€)')
    ###################################################################
    ###########    End of spend curve plot      ###############
    ###################################################################
    #     Total probability weighted cost
    weightedcost = 0.0
    for kk in range(nrisks):
        weightedcost += total_cost[kk] * meanprobability[kk]
    weightedcost = locale.currency(weightedcost * 0.001,
                                   grouping=True)  # convert to M€
    ##   weightedcost = weightedcost*.001
    #     Expected cost of risks from Monte Carlo
    expectedcost = locale.currency(mean[len_xx - 1], grouping=True)
    ##    expectedcost = mean[len_xx-1]
    #     Standard deviation of costs from Monte Carlo
    #deviationcost = sigma[nrisks-1]
    #     50,70,80,90,99% confidence level; output is formatted string
    hold50 = percentage(final_totals_distribution, 0.5)
    cellbound50 = locale.currency(hold50, grouping=True)
    #    cellbound50 = hold50
    hold70 = percentage(final_totals_distribution, 0.7)
    cellbound70 = locale.currency(hold70, grouping=True)
    #    cellbound70 = hold70
    hold80 = percentage(final_totals_distribution, 0.8)
    cellbound80 = locale.currency(hold80, grouping=True)
    #    cellbound80 = hold80
    hold90 = percentage(final_totals_distribution, 0.9)
    cellbound90 = locale.currency(hold90, grouping=True)
    #    cellbound90 = hold90
    hold99 = percentage(final_totals_distribution, 0.99)
    cellbound99 = locale.currency(hold99, grouping=True)
    #    cellbound99 = hold99
    #  Write the output
    print("\n\r Total number of iterations %d  " % num_trials)
    print("\n\r Total number of risks %d  " % nrisks)
    print("\n\r Probability weighted total cost of risks: " +
          str(weightedcost) + "M")
    print("\n\r Cost at 50 percent confidence level: " + str(cellbound50) +
          "M")
    print("\n\r Cost at 70 percent confidence level: " + str(cellbound70) +
          "M")
    print("\n\r Cost at 80 percent confidence level: " + str(cellbound80) +
          "M")
    print("\n\r Cost at 90 percent confidence level: " + str(cellbound90) +
          "M")
    print("\n\r Cost at 99 percent confidence level: " + str(cellbound99) +
          "M")
    ## Prepare the data for plotting all plots except the spend curve (Figures 1, 2, and 3)
    final_totals_distribution.sort(
    )  # sorts input from lowest to highest value
    num_trials100 = num_trials / 100.
    niter = list(range(num_trials))
    niter2 = [float(i) / num_trials for i in niter]
    niter3 = [100. - float(i) / num_trials100 for i in niter]
    ylim = 1000.0
    if (num_trials > 10000):
        ylim = 1500.
    elif (num_trials <= 1000):
        ylim = 500.
    ##
    #######################################################################3
    #  Plotting package below for everything except spend curve
    #######################################################################3
    ##                                                                            Figure 1
    ##
    fig = plt.figure(1)
    ax = fig.add_subplot(111)
    ax.hist(final_totals_distribution, bins=30)
    ax.set_ylim([0.0, ylim])
    xlim = 20. * (int(max(final_totals_distribution) / 20.) + 1)
    ax.set_xlim([0.0, xlim])
    pylab.title('%s Risk Monte Carlo' % projectname)
    ax.set_xlabel('Total Cost as-spent €M')
    ax.set_ylabel('Number of occurrences')
    ax.grid(True)
    textstring = "Number of iterations: %d " % num_trials
    textstring0 = "Number of risks: %d " % nrisks
    textstring1 = "Prob weighted risk exposure: " + str(weightedcost) + "M"
    textstring2 = "Cost at 50% confidence: " + str(cellbound50) + "M"
    textstring3 = "Cost at 80% confidence: " + str(cellbound80) + "M"
    pylab.text(.1, .85, textstring, transform=ax.transAxes)
    pylab.text(.1, .80, textstring0, transform=ax.transAxes)
    pylab.text(.1, .75, textstring1, transform=ax.transAxes)
    pylab.text(.1, .70, textstring2, transform=ax.transAxes)
    pylab.text(.1, .65, textstring3, transform=ax.transAxes)
    ax2 = ax.twinx()
    ax2.set_ylabel('Cumulative fraction of occurances', color='r')
    ax2.plot(final_totals_distribution, niter2, c='r')
    ax2.set_ylim([0.0, 1.0])
    # draw an arrow
    arga = {'color': 'r'}
    ax2.arrow(hold50,
              .50,
              10.,
              .00,
              shape='full',
              lw=2,
              head_length=3,
              head_width=.03,
              **arga)
    ##
    ##                                                                            Figure 2
    ##
    fig = plt.figure(2)
    ax = fig.add_subplot(111)
    pylab.title('%s Risk Monte Carlo' % projectname)
    ax.set_xlabel('Total Cost as-spent €M')
    ax.set_ylabel('Percent Probability{Cost > x }')
    ax.grid(True)
    #
    #Xbackground = [[.6, .6],[.5,.5]]
    # plot the probability line
    ax.plot(final_totals_distribution, niter3)
    ax.set_xlim([0.0, xlim])
    ax.set_ylim([0.0, 100.0])
    # draw the background
    ##    ax.imshow(Xbackground, interpolation='bicubic', cmap=cm.copper,
    ##         extent=(40.0,xlim, 0.0, 100.), alpha=.5)  # alpha --> transparency
    # resample the x-axis
    xx = []
    yy = []
    nsteps = 110
    delx = xlim / (nsteps - 10)
    for ii in range(nsteps):
        xx.append(ii * delx)
    yy = np.interp(xx, final_totals_distribution, niter3)
    for jj in range(0, nsteps - 5, 3):
        x1 = xx[jj - 1]
        x2 = xx[jj + 1]
        y2 = yy[jj]
        ##         mybar(ax,x1,x2,y2)
        ax.bar(xx[jj], yy[jj], align='center', color='r')
    # draw a few arrows and vertical lines
    ax.arrow(hold50 + 10,
             50,
             -10.,
             .0,
             shape='full',
             lw=3,
             length_includes_head=True,
             head_width=2)
    ax.vlines(hold50, 0.0, 50, linewidth=4)
    ax.arrow(hold80 + 10,
             20,
             -10.,
             .0,
             shape='full',
             lw=3,
             length_includes_head=True,
             head_width=2)
    ax.vlines(hold80, 0.0, 20, linewidth=4)
    pylab.text(hold50 + 1, 52, textstring2)  # 50% value
    pylab.text(hold80 + 1, 22, textstring3)  # 80% value
    ax.set_aspect('auto')
    ##
    ##                                                                            Figure 3 subplot 1
    ##
    fig, axes = plt.subplots(nrows=2, ncols=1)
    fig.subplots_adjust(hspace=.75)
    ##    fig.tight_layout()
    ax3 = fig.add_subplot(211)
    pylab.title('Histogram of %s risk costs (as-spent EUR)' % projectname)
    ax3.set_xlabel('Cost as-spent €K')
    ax3.set_ylabel('Number of risks')
    ##    yy = hist(total_cost,bins=20)
    ##    ax3.set_xlim(0.0,yy[1].max())
    ##    ax3.set_ylim(0.0,yy[0].max())
    ax3.autoscale(enable=True, axis='both', tight=None)
    labels = ax3.get_xticklabels()
    for label in labels:
        label.set_rotation(45)
    ax3.plot = hist(total_cost, bins=20)
    ##
    ##                                                                            Figure 3 subplot 2
    ##
    ax4 = fig.add_subplot(212)
    ax4.autoscale(enable=True, axis='both', tight=None)
    pylab.title('Histogram of %s prob-wght\'ed as-spent risk costs' %
                projectname)
    ax4.set_xlabel('Cost €K')
    ax4.set_ylabel('Number of risks')
    temp = [total_cost[ii] * meanprobability[ii] for ii in range(nrisks)]
    labels = ax4.get_xticklabels()
    for label in labels:
        label.set_rotation(45)
    ax4.plot = hist(temp, bins=20)
    plt.show()
    sys.stdout = fhold
                                                    x_T[k], k, dt, a_max,
                                                    v_max)

    # Lock the position setpoint if the error is bigger than some value
    drone_position = 0.0
    x_err = x_T_new - drone_position
    if abs(x_err) > x_err_max:
        x_T[k + 1] = x_T[k]
    else:
        x_T[k + 1] = x_T_new

T123 = 5.0
T1 = computeT1_T123(T123,
                    accel_prev=0.0,
                    vel_prev=0.0,
                    vel_setpoint=2.0,
                    max_jerk=10.0)
T3 = compute_T3(T1, 0.0, 10.0)
T2 = compute_T2_T123(T123, T1, T3)
print("T1 = {}\tT2 = {}\tT3 = {}\n".format(T1, T2, T3))
# Plot trajectory and desired setpoint
plt.step(t, v_d)
plt.step(t, j_T)
plt.step(t, a_T)
plt.step(t, v_T)
plt.step(t, x_T)
plt.legend(["v_d", "j_T", "a_T", "v_T", "x_T"])
plt.xlabel("time (s)")
plt.ylabel("metric amplitude")
plt.show()
コード例 #18
0
import numpy as np
import scipy
import matcompat

# if available import pylab (from matlibplot)
try:
    import matplotlib.pylab as plt
except ImportError:
    pass

syst_fake = tf(np.array(np.hstack((1.))), np.array(np.hstack((1., 2., 3.))))
syst_fake_dis = c2d(syst_fake, 0.01)
[output, t] = plt.step(syst_fake_dis)
plt.plot(output)
out_len = length(output)
input = np.arange(1., (out_len) + 1)
input[:] = 1.
[num, den] = stmcb_test(output, input, 0., 2.)
#% sys_model=tf(num,den,0.01)
#% step(sys_model)
#% hold on
#% step(syst_fake)
コード例 #19
0
              bottom=0.13,
              wspace=0.02,
              hspace=0.075)

    ax = []

    ax.append(plt.subplot(gs[0, 0]))
    ax.append(plt.subplot(gs[1, 0]))
    ax.append(plt.subplot(gs[0, 1]))
    ax.append(plt.subplot(gs[1, 1]))

    # Upper left
    plt.sca(ax[0])
    n, b = np.histogram(ejedep, 201, [-0.005, 2.005])
    x = b[:-1] + 0.5 * np.diff(b)
    plt.step(x, n, where='mid', c=colors.black)
    #    plt.xlabel(r'EJ299 Energy [keV$_\mathrm{ee}$]')
    plt.ylabel(r'Counts / 10 keV$_\mathrm{ee}$', labelpad=-3)
    plt.yscale('log', nonposy='clip')
    plt.tick_params(axis='x', labelbottom='off')
    plt.xlim(0, 1.750)
    plt.xticks([0, 0.500, 1.000, 1.500])
    plt.ylim(5, 5e3)

    # Lower left
    plt.sca(ax[1])
    x = ejedep
    y = np.sum(csiedep, axis=1)
    plt.scatter(x, y, marker='+', alpha=0.2, color=colors.black)
    plt.axvline(ej_en_th / 1000.0, color=colors.red, linestyle='dashed')
    plt.xlim(0, 1.750)
コード例 #20
0
ファイル: test.py プロジェクト: noahdesu/dbqos
import matplotlib.pylab as plt
import scikits.statsmodels.tools as sm
import numpy as np

sample = np.random.uniform(-1000, 1000, 5000)
ecdf = sm.tools.ECDF(sample)

x = np.linspace(min(sample), max(sample))
y = ecdf(x)
plt.step(x,y)
plt.show()

#a = array([...]) # your array of numbers
#a = numpy.random.randint(-1000, 1000, size=2000)
#num_bins = 20
#counts, bin_edges = numpy.histogram(a, bins=num_bins, normed=True)
#print counts, counts.sum()
#cdf = numpy.cumsum(counts)
#plt.plot(bin_edges[1:], cdf)
#plt.show()
コード例 #21
0
                    alpha=.5,
                    color='orange',
                    step='pre',
                    label="95% boostrap CI")
plt.suptitle('Estimated relative risks with 95% confidence bands')
axarr[0][1].legend(loc='best')
[ax[0].set_ylabel('Relative incidence') for ax in axarr]
[ax.set_xlabel('Time after exposure start') for ax in axarr[-1]]
if remove_last_plot:
    fig.delaxes(axarr[-1][-1])
plt.show()

normalize = lambda x: x / np.sum(x)
m = np.repeat(np.hstack(refitted_coeffs[-6:]), 125)
lb = np.repeat(np.hstack(lower_bound[-6:]), 125)
ub = np.repeat(np.hstack(upper_bound[-6:]), 125)
plt.figure()
plt.plot(np.arange(n_intervals),
         normalize(np.exp(time_drift(np.arange(n_intervals)))))
plt.step(np.arange(n_intervals), normalize(np.exp(m)))
plt.fill_between(np.arange(n_intervals),
                 np.exp(lb) / np.exp(m).sum(),
                 np.exp(ub) / np.exp(m).sum(),
                 alpha=.5,
                 color='orange',
                 step='pre')
plt.xlabel('Age')
plt.ylabel('Normalized Age Relative Incidence')
plt.title("Normalized age effect with 95% confidence bands")
plt.show()
コード例 #22
0
import matplotlib.pylab as plt
import numpy as np

def my_lines(ax, pos, *args, **kwargs):
    if ax == 'x':
        for p in pos:
            plt.axvline(p, *args, **kwargs)
    else:
        for p in pos:
            plt.axhline(p, *args, **kwargs)

bits = [0,1,0,1,0,0,1,1,1,0,0,1,0]
data = np.repeat(bits, 2)
clock = 1 - np.arange(len(data)) % 2
manchester = 1 - np.logical_xor(clock, data)
t = 0.5 * np.arange(len(data))

plt.hold(True)
my_lines('x', range(13), color='.5', linewidth=2)
my_lines('y', [0.5, 2, 4], color='.5', linewidth=2)
plt.step(t, data + 2, 'r', linewidth = 2, where='post')
plt.ylim([-1,6])

for tbit, bit in enumerate(bits):
    plt.text(tbit + 0.5, 1.5, str(bit))

plt.gca().axis('off')
plt.show()
コード例 #23
0
    xhatKalman = kalmanFilter(x, omega, y)
    xhatExtended = extendedKalmanFilter(x, omega, y)

    x = xnew
    omegavec[0:4, ii:ii + 1] = omega
    states[0:12, ii:ii + 1] = x
    measurements[0:7, ii:ii + 1] = y
    estimatesKalman[0:12, ii:ii + 1] = xhatKalman
    estimatesExtended[0:12, ii:ii + 1] = xhatExtended

# Visualize simulation

if 1:  # States
    plt.figure(1)
    plt.plot(np.transpose(timevec), np.transpose(estimatesKalman[2, :]))
    plt.plot(np.transpose(timevec), np.transpose(estimatesExtended[2, :]))
    plt.step(np.transpose(timevec), np.transpose(states[2, :]))
    plt.xlabel('Time, [s]')
    plt.ylabel('$\mathbf{r}(t)$')
    plt.legend(['x', 'y', 'z'], loc=1, fontsize=6)

    plt.figure(2)
    plt.plot(np.transpose(timevec), np.transpose(estimatesKalman[6, :]))
    plt.plot(np.transpose(timevec), np.transpose(estimatesExtended[6, :]))
    plt.step(np.transpose(timevec), np.transpose(states[6, :]))
    plt.xlabel('Time, [s]')
    plt.ylabel('${\mathbf{\eta}}(t)$')
    plt.legend(['$\phi$', '$\Theta$', '$\psi$'], loc=1, fontsize=6)

plt.show()
コード例 #24
0
ファイル: eval.py プロジェクト: yli96/DSEBM
def eval():
    logger = logging.getLogger(__name__)

    dataset = MNIST(is_train=False, batch_size=FLAGS.batch_size)

    ### Network definition
    images, labels = dataset.dummy_inputs()
    ebm = EBM()
    energies = ebm.energy(images)

    #### Session setting
    save_dict = ebm.load_saver_dict()
    saver = tf.train.Saver(save_dict)

    gpu_options = tf.GPUOptions(allow_growth=True)
    session_config = tf.ConfigProto(gpu_options=gpu_options,
                                    allow_soft_placement=True)

    list_energies = []
    list_images = []
    list_labels = []

    with tf.train.SingularMonitoredSession(
            config=session_config, checkpoint_dir=FLAGS.dir_parameter) as sess:
        num_iter = 0

        while not sess.should_stop():
            if dataset.completed:
                break

            cur_images, cur_labels = dataset.next_batch()

            cur_energies = sess.run(energies, feed_dict={images: cur_images})

            list_energies += cur_energies.tolist()
            list_images += cur_images.tolist()
            list_labels += cur_labels.tolist()

    sorted_energies, sorted_images, sorted_labels = zip(
        *sorted(zip(list_energies, list_images, list_labels), reverse=True))
    count_image = 0
    for cur_energy, cur_image in zip(sorted_energies, sorted_images):
        count_image += 1
        cur_path = os.path.join(FLAGS.dir_eval,
                                "{:04}.png".format(count_image))
        cur_image = dataset.depreprocess(cur_image)
        cv2.imwrite(cur_path, cur_image)

    precisions, recalls, thresholds = skmetrics.precision_recall_curve(
        y_true=sorted_labels, probas_pred=sorted_energies, pos_label=1)
    # MEMO: mean precision in the paper (maybe)
    average_precision = skmetrics.average_precision_score(
        y_true=sorted_labels, y_score=sorted_energies)

    plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
    plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('2-class Precision-Recall curve: AUC={0:0.2f}'.format(
        average_precision))
    plt.show()
コード例 #25
0
    #         labcodes.append(continents[labels[i]])

    df_countryinfo_final.to_csv('1_' + imgobj + "_cluster.csv", index=False)

## pca analysis

pca = PCA(2)  # project from 64 to 2 dimensions
projected = pca.fit_transform(x_scaled)

#c=df_all_data.countrycode,
plt.scatter(projected[:, 0],
            projected[:, 1],
            c=df_summary.cluster,
            edgecolor='none',
            alpha=2.5,
            cmap=plt.cm.get_cmap('spectral', groups))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar()

## here we see first two components overlap heavily

pca = PCA(n_components=14)
X_train_pca = pca.fit_transform(x_scaled)
plt.bar(range(0, 14), pca.explained_variance_ratio_, alpha=0.5, align='center')
plt.step(range(0, 14), np.cumsum(pca.explained_variance_ratio_), where='mid')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.xticks([], [])
plt.show()
コード例 #26
0
# Typical Arrival rate

hours = np.arange(0, 25, 1)
typ_a_r = [
    0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.1, 0.2, 0.3, 0.5, 0.6, 0.8, 0.6,
    0.45, 0.6, 0.62, 0.68, 0.5, 0.4, 0.4, 0.3, 0.2, 0.08, 0.08, 0.08
]
normalised_a_r = []

for i in typ_a_r:
    normalised_a_r.append(i * 15)

plt.figure()
plt.xlabel("hours")
plt.ylabel("Arrival Rate")
plt.step(hours, normalised_a_r)
plt.savefig("figs/typical.png")
#plt.show()

# Hour price per day (parameters from price function)
mu = 0.8
tau = 3
phi = 1.1
gam = 3

a = 0.159
b = 0.7
c = 2

C = 100
コード例 #27
0
    #Lazy cold cut
    coldcut = 5

    medSub = medLight - medDark
    medSub[np.where(medSub <= coldcut)] = np.nan

    cpsPerArea = medSub / areaMKID
    QE = cpsPerArea / nPDraw[i]

    print QE

    if i == 0: plotArray(medSub)
    if i == 0:
        hist, bins = np.histogram(QE, bins=40, range=[0, 0.2])
        plt.step(bins[:-1], hist, label=wvls[i])
        plt.xlabel(r'QE fraction')
        plt.ylabel(r'N resonators')
        plt.legend()
        plt.show()

    medQE = np.nanmedian(QE.flatten())
    stdevQE = np.nanstd(QE.flatten())

    medQEArray.append(medQE)
    stdevQEArray.append(stdevQE)

#print wvls
#print medQEArray
#print stdevQEArray
コード例 #28
0
    variance = 0.001
    y += np.random.normal(0,sqrt(variance),[len(y),1])

    xhat = kalmanFilter(x, omega, y)
    
    x = xnew
    omegavec[0:4,ii:ii+1] = omega
    states[0:12,ii:ii+1] = x
    measurements[0:7,ii:ii+1] = y
    estimates[0:12,ii:ii+1] = xhat


# Visualize simulation
if 1: # Omega
    plt.figure(1)
    plt.step(np.transpose(timevec),np.transpose(omegavec))
    plt.legend(['$\omega_1$','$\omega_2$','$\omega_3$','$\omega_4$'])
    plt.xlabel('Time, [s]')
    plt.ylabel('Angular velocity, [rad/s]')

if 1: # States
    plt.figure(1)
    f, axarr = plt.subplots(2, 2)
    axarr[0,0].step(np.transpose(timevec),np.transpose(estimates[0:3,:]))
    axarr[0,0].step(np.transpose(timevec),np.transpose(states[0:3,:]))
    axarr[0,0].set_xlabel('Time, [s]')
    axarr[0,0].set_ylabel('$\mathbf{r}(t)$')
    axarr[0,0].legend(['x','y','z'],loc=3,fontsize=6)

    axarr[0,1].step(np.transpose(timevec),np.transpose(estimates[3:6,:]))
    axarr[0,1].step(np.transpose(timevec),np.transpose(states[3:6,:]))
コード例 #29
0
    plt.plot(lcDict['time'],lcDict['intensity'], label=simLabel)
    plt.xlabel("Time (s)",fontsize=14)
    plt.ylabel("Counts",fontsize=14)
    plt.legend()
    plt.show()

    # take histogram of LC intensities
    hist, bins = histogramLC(lcDict['intensity'], norm=True, centers=True)
    guessIc = np.mean(lcDict['intensity'])*0.7
    guessIs = np.mean(lcDict['intensity'])*0.3

    # fit a MR to the histogram of the lightcurve intensities
    fitIc, fitIs = pdfs.fitMR(bins, hist, guessIc, guessIs)
    fitMR = pdfs.modifiedRician(I, fitIc, fitIs)

    # fit a poisson to the histogram to show it doesn't do as well
    guessLam = np.mean(lcDict['intensity'])
    fitLam = pdfs.fitPoisson(bins,hist,guessLam)
    fitPoisson = pdfs.poisson(I,fitLam)


    plt.plot(I,simMR,label=simLabel)
    plt.step(bins,hist,color='grey',label="Histogram of LC Intensities",where='mid')
    plt.plot(I,fitMR,label="MR fit to histogram: Ic=%2.2f, Is=%2.2f"%(fitIc, fitIs))
    plt.plot(I,fitPoisson,label="Poisson fit to histogram: Lambda=%2.2f"%(fitLam))
    plt.legend()
    plt.xlabel("Intensity",fontsize=14)
    plt.ylabel("Frequency",fontsize=14)
    plt.show()

コード例 #30
0
                                     upper_cut_func(xPlot),
                                     lower_cut_func(xPlot),
                                     color=plotCol,
                                     alpha=0.2)
                    plt.legend(loc='upper right', framealpha=0, fontsize=10)
                    plt.xlim(0, 1.4)
                    plt.ylim(0.4, 1.0)
                    plt.tick_params(labelbottom='off', labelleft='off')

                    # Plot top histogram
                    nBins_top = 100
                    rng_top = [-1, 1]
                    plt.sca(ax[1])
                    n_top, b_top = np.histogram(onset, nBins_top, rng_top)
                    x_top = np.diff(b_top) * 0.5 + b_top[:-1]
                    plt.step(x_top, n_top, where='mid', color=colors.black)
                    plt.xticks([-0.75, 0, 0.75])
                    plt.xlim(-1, 1)
                    plt.tick_params(labelbottom='off')
                    plt.ylabel(r'Counts / %.2f $\mu$s' %
                               (1.0 * np.diff(rng_top) / nBins_top))
                    plt.locator_params(axis='y', nbins=5)

                    # Plot side histogram
                    plt.sca(ax[2])
                    plt.step(n_side['Signal'],
                             np.arange(npe_max + 1) - 0.5,
                             color=colors.red,
                             label='Signal',
                             where='pre')
                    plt.step((n_side['Low_BG']) / 0.9125 * wd,
コード例 #31
0
ファイル: imd.py プロジェクト: holla2040/valvestudio
plt.ylabel('amplitude')
plt.title('output non-linear')

yf = fft(y, int(N))                             # output fft
Y = np.abs(yf[0:int(N/2.0+1.0)])                # single-sided
Ydb = mag2db(Y)                                 # convert to db
Ydb = Ydb-np.amax(Ydb)
n = np.arange(1.0,(N/2.0+1.0)+(1.0), 1.0)
plt.subplot(3, 1, 3)

indices = Ydb[0:1000] > -90
peakf = n[indices]
peaka = Ydb[indices]


plt.step(n[0:1000],Ydb[0:1000])
plt.xlabel('KHz')
yr = range(40,-121,-20)
#yr.insert(0,10)
plt.yticks(yr)
plt.xticks(range(0,1000,50))
plt.ylabel('magnitude')
plt.title('Two-tone output FFT with Intermodulation products')
plt.grid(True)

for i in range(len(peakf)):
    plt.annotate("%.0f\n%.1f"%(peakf[i],peaka[i]),
        xy=(peakf[i],peaka[i]),
        xycoords='data',
        xytext=(-15,5),
        textcoords='offset points',
コード例 #32
0
plt.xlabel('Iteration')
plt.ylabel('Precision')

bar3 = ax1.bar([float(y)-.1 for y in x], presicion_list,width=0.1,color='r',align='center')
bar4 = ax1.bar(x, presicion_list_desicion,width=0.1,color='g',align='center')
ax1.legend((bar3[0], bar4[0]), ('KNN Classifier', 'Decision tree'),loc=2)


plt.show()
combined_list = [accuracy_list]


# print('----------------------------precision_recall_curve------------------------------------')
y_real = np.concatenate(y_real)
y_proba = np.concatenate(y_proba)
precision, recall, _ = precision_recall_curve(y_real, y_proba)
lab = 'Overall AUC=%.4f' % (auc(recall, precision))

print(precision)
plt.step(recall, precision, label=lab, lw=2, color='black')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc='lower left', fontsize='small')

#f.tight_layout()
plt.show()




                fitData['S'][ejThreshold].append(minScaling)
                fitData['S'][ejThreshold].append(maxScaling)
                fitData['QF'][ejThreshold].append(minQF)
                fitData['QF'][ejThreshold].append(maxQF)

                # Plot best fit values
                plt.scatter([bestScaling/1000.0],[bestQF],marker='.',color='w')

                # Plot exp residual and scaled simulated spectrum
                ax1 = plt.subplot(122)
                _npe =  csiEdep* lightYield * bestQF
                nSim,b = np.histogram(_npe,41,[-0.5,40.5])
                nSim = poissonSmear(nSim,kernels)
                plt.errorbar(xPlot,nExp,yerr=nExpErr,linestyle='None',color=expColor,capsize=0,marker='o')
    #            plt.plot(xPlot,nSim*bestScaling/scalingConversion,c=simColor)
                plt.step(xPlot,nSim*bestScaling/scalingConversion,c=simColor,where='mid')
                plt.axhline(0,color=colors.black,linestyle='dashed')
                plt.ylabel('Counts / photoelectron')
                plt.xlabel('Number of photoelectrons')
                plt.xlim(0,30)
                plt.tight_layout(pad=0.25)
                plt.savefig(plotDir + '%d-%d-%s.png'%(angle,ejThreshold,qType),dpi=200)
                plt.savefig(plotDir + '%d-%d-%s.pdf'%(angle,ejThreshold,qType),dpi=200)
                plt.close('all')
    #            plt.show()

    if saveData:
        with open(dataOutputDir + '%s-%s.dat'%(angle,fileLabel[freeNeutronFluxScaling]),'w') as f:
            f.write('EJ-Threshold\tBest-QF\tBest-QF-Err1\tBest-QF-Err2\tBest-S\tBest-S-Err1\tBest-S-Err2\tNeg-QF\tNeg-QF-Err1\tNeg-QF-Err2\tNeg-S\tNeg-S-Err1\tNeg-S-Err2\tPos-QF\tPos-QF-Err1\tPos-QF-Err2\tPos-S\tPos-S-Err1\tPos-S-Err2\n')
            for ejThreshold in ejThresholdArr:
                f.write('%d\t%.5f\t%.5f\t%.5f\t%.1f\t%.1f\t%.1f\t%.5f\t%.5f\t%.5f\t%.1f\t%.1f\t%.1f\t%.5f\t%.5f\t%.5f\t%.1f\t%.1f\t%.1f\n'%(ejThreshold,
コード例 #34
0
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)

#plot the varience explained ratio
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]

cum_var_exp = np.cumsum(var_exp)
import matplotlib.pylab as plt
plt.bar(range(1, 14),
        var_exp,
        alpha=0.5,
        align='center',
        label='Individual explained variance')

plt.step(range(1, 14),
         cum_var_exp,
         where='mid',
         label='Cumulative explained variance ')
plt.ylabel('Explained varieance ratio')
plt.xlabel('Principal component index')
plt.legend(loc='best')
plt.tight_layout()
plt.show()

# make a list of (eigenvalue, eigenvector) tuples

eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
               for i in range(len(eigen_vals))]
#sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs.sort(key=lambda k: k[0], reverse=True)

# select the two top eigen pairs that have 60 percent of the variance
コード例 #35
0
ファイル: TradeTest.py プロジェクト: patruong/MEX_Thesis
        Capital = (Capital + df_change.iloc[i]["Adj Close"] * n_stock)
        n_stock = 0

        print("SOLD stock at " + str(df_change.iloc[i].name.date()))
        print("Capital: " + str(Capital))
        print()

        value.append(Capital)
        dates.append(df_change.iloc[i].name.date())

print("Stocks: " + str(n_stock))
print("Stock Value: " + str(n_stock * df_change.iloc[i]["Adj Close"]))
print("Capital: " + str(Capital))
print("Total Value: " +
      str(Capital + n_stock * df_change.iloc[i]["Adj Close"]))
plt.step(dates, value)
""" MA20 MA50 """

"BACK TESTING"
t1 = (ma20[:-1] > ma50[:-1]).values
t2 = (ma20[1:] > ma50[1:]).values
plt.plot(t1 == t2)

df_ma20ma50 = pd.DataFrame(data=t1,
                           index=omx.index[:-1],
                           columns=['ma20>ma50'])
df_SQ = pd.DataFrame(data=(t1 == t2),
                     index=omx.index[:-1],
                     columns=['StatusQuo'])
df_test = pd.concat([omx_adj[:-1], df_ma20ma50, df_SQ],
                    axis=1)[49:]  #49 because this is where ma50 begins
コード例 #36
0
class TestFreezingMethods:
    # TODO #599
    # @staticmethod
    # def test_freeze_singular(backend):
    #     pass

    @staticmethod
    def test_freeze_time_dependent(plot=False):
        # Arrange
        cases = (
            {'dt': 5e5, 'N':  1},
            {'dt': 1e6, 'N':  1},
            {'dt': 5e5, 'N':  8},
            {'dt': 1e6, 'N':  8},
            {'dt': 5e5, 'N': 32},
            {'dt': 1e6, 'N': 32},
        )
        rate = 1e-9
        immersed_surface_area = 1
        constant.J_het = rate / immersed_surface_area

        number_of_real_droplets = 1024
        total_time = 2e9  # effectively interpretted here as seconds, i.e. cycle = 1 * si.s

        # dummy (but must-be-set) values
        vol = 44  # just to enable sign flipping (ice water uses negative volumes), actual value does not matter
        dv = 666  # products use concentration, just dividing there and multiplying back here, actual value does not matter

        hgh = lambda t: np.exp(-0.8 * rate * (t - total_time / 10))
        low = lambda t: np.exp(-1.2 * rate * (t + total_time / 10))

        # Act
        output = {}

        for case in cases:
            n_sd = int(number_of_real_droplets // case['N'])
            assert n_sd == number_of_real_droplets / case['N']
            assert total_time // case['dt'] == total_time / case['dt']

            key = f"{case['dt']}:{case['N']}"
            output[key] = {'unfrozen_fraction': [], 'dt': case['dt'], 'N': case['N']}

            formulae = Formulae(heterogeneous_ice_nucleation_rate='Constant')
            builder = Builder(n_sd=n_sd, backend=CPU(formulae=formulae))
            env = Box(dt=case['dt'], dv=dv)
            builder.set_environment(env)
            builder.add_dynamic(Freezing(singular=False))
            attributes = {
                'n': np.full(n_sd, int(case['N'])),
                'immersed surface area': np.full(n_sd, immersed_surface_area),
                'volume': np.full(n_sd, vol)
            }
            products = (IceWaterContent(specific=False),)
            particulator = builder.build(attributes=attributes, products=products)

            env['a_w_ice'] = np.nan

            cell_id = 0
            for i in range(int(total_time / case['dt']) + 1):
                particulator.run(0 if i == 0 else 1)

                ice_mass_per_volume = particulator.products['qi'].get()[cell_id]
                ice_mass = ice_mass_per_volume * dv
                ice_number = ice_mass / (const.rho_w * vol)
                unfrozen_fraction = 1 - ice_number / number_of_real_droplets
                output[key]['unfrozen_fraction'].append(unfrozen_fraction)

        # Plot
        if plot:
            fit_x = np.linspace(0, total_time, num=100)
            fit_y = np.exp(-rate * fit_x)

            for key in output.keys():
                pylab.step(
                    output[key]['dt'] * np.arange(len(output[key]['unfrozen_fraction'])),
                    output[key]['unfrozen_fraction'],
                    label=f"dt={output[key]['dt']:.2g} / N={output[key]['N']}",
                    marker='.',
                    linewidth=1 + output[key]['N']//8
                )

            pylab.plot(fit_x, fit_y, color='black', linestyle='--', label='theory', linewidth=5)
            pylab.plot(fit_x, hgh(fit_x), color='black', linestyle=':', label='assert upper bound')
            pylab.plot(fit_x, low(fit_x), color='black', linestyle=':', label='assert lower bound')
            pylab.legend()
            pylab.yscale('log')
            pylab.ylim(fit_y[-1], fit_y[0])
            pylab.xlim(0, total_time)
            pylab.xlabel("time")
            pylab.ylabel("unfrozen fraction")
            pylab.grid()
            pylab.show()

        # Assert
        for key in output.keys():
            data = np.asarray(output[key]['unfrozen_fraction'])
            x = output[key]['dt'] * np.arange(len(data))
            np.testing.assert_array_less(data, hgh(x))
            np.testing.assert_array_less(low(x), data)
コード例 #37
0
ファイル: Utilities.py プロジェクト: CPCLAB-UNIPI/MPC-code
def makeplot(tsim, X1, label, pf, *var, **kwargs):
    """
    SUMMARY:
    It constructs the plot where tsim is on the x-axis, 
    X1,X2,X3 on the y-axis, and label is the label of the y-axis 
    
    SYNTAX:
    makeplot(tsim,X1,label,*var):
  
    ARGUMENTS:
    + tsim          - x-axis vector (time of the simulation (min))
    + X1,X2,X3      - y-axis vectors.
    X1 represent the actual value
    X2 the target (eventual)
    X3 the setpoint (eventual)
    + label         - label for the y-axis
    + pf            - path where the plots are saved
    + var           - positional variables to include another vector/s X2 and X3 to plot together with X1
    + kwargs        - plot options including linestyle and changing the default legend values
    """

    linetype = '-'  #defaul value for linetype
    lableg = 'Target'  #defaul value for legend label
    for kwkey in kwargs:
        if kwkey == 'pltopt':
            linetype = kwargs['pltopt']
        if kwkey == 'lableg':
            lableg = kwargs['lableg']

    nt = int(tsim.size)

    X1 = np.array(X1)

    sz = old_div(X1.size, nt)
    Xout1 = np.zeros((nt, sz))
    Xout2 = np.zeros((nt, sz))
    Xout3 = np.zeros((nt, sz))

    for k in range(sz):
        x1 = X1[k::sz]

        plt.figure()
        try:
            plt.plot(tsim, x1, ls=linetype)
        except ValueError:
            plt.step(tsim, x1)
        plt.xlabel('Time ')
        plt.ylabel(label + str(k + 1))
        plt.gca().set_xlim(left=0, right=tsim[-1])

        Xout1[:, k] = np.reshape(x1, (nt, ))

        for i_var in range(len(var)):
            # extract dimension of var
            var_i = var[i_var]
            Xi = np.array(var_i)
            xi = Xi[k::sz]
            try:
                plt.plot(tsim, xi, ls=linetype)
            except ValueError:
                plt.step(tsim, xi)
            if i_var == 0:
                plt.legend(('Actual', lableg))
                plt.gca().set_xlim(left=0, right=tsim[-1])
                Xout2[:, k] = np.reshape(xi, (nt, ))
            elif i_var == 1:
                plt.legend(('Actual', 'Target', 'Set-Point'))
                plt.gca().set_xlim(left=0, right=tsim[-1])
                Xout3[:, k] = np.reshape(xi, (nt, ))

        plt.grid(True)

        plt.savefig(pf + label + str(k + 1) + '.pdf',
                    format='pdf',
                    transparent=True,
                    bbox_inches='tight')

    return [Xout1, Xout2, Xout3]
コード例 #38
0
ファイル: index.py プロジェクト: Dnnd/tips_homework_2
plt.title('PDF Histogram')
plt.xticks(bin_edges)
hist_norm, _ = np.histogram(data1, 'sturges', density=True)
plt.hist(data1, bins='sturges', color='gray', edgecolor='black', density=True)
plt.plot(midpoints, hist_norm, color='black')
tpdf = theor_pdf(x_tdf, rel_scale_coeff)

plt.plot(x_tdf, tpdf, ls='dashed', color='r')
plt.title('PDF histogram')
plt.grid(b=True, which='both', axis='y')
#plt.savefig('pdf_hist.png', dpi=900)

probs = [x / np.sum(hist) for x in hist]
cdf = np.cumsum(probs, dtype=float)
plt.figure(20)
plt.step(midpoints, cdf, where='post', color='black')
plt.grid(b=True, which='both', axis='both')
plt.xticks(midpoints)
plt.title('ECDF')

tcdf = theor_cdf(x_tdf, rel_scale_coeff)
plt.plot(x_tdf, tcdf, ls='dashed', color='r')
#plt.savefig('ecdf.png', dpi = 900)
ct = np.array([
    x * (np.sum(hist) * bins_width)
    for x in theor_pdf(midpoints, rel_scale_coeff)
])

chi2_emp = np.sum(((hist - ct)**2) / ct)

chi2_crit = st.chi2.ppf(0.99, np.size(bin_edges) - 2)
コード例 #39
0
g1=clf.feature_importances_

##预测特征得分
y_score = clf.predict_proba(X_train)[:,1]
y_test_score=clf.predict_proba(X_test)[:,1]
y_verify_score=clf.predict_proba(X_verify)[:,1]


##pr curve
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score

precision, recall, thresholds = precision_recall_curve(y_test, y_test_score)
average_precision = average_precision_score(y_test, y_test_score)

plt.step(recall, precision, color='b', alpha=0.2,where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2,color='b')

plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))


##计算auc值以及做图 
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
roc_auc_score(y_train, y_score)
roc_auc_score(y_test, y_test_score)
roc_auc_score(y_verify, y_verify_score)
コード例 #40
0
class TestFreezingMethods:
    # TODO #599
    @staticmethod
    # pylint: disable=redefined-outer-name
    def test_freeze_singular(backend_class):
        pass

    @staticmethod
    def test_freeze_time_dependent(plot=False):
        # Arrange
        cases = (
            {'dt': 5e5, 'N':  1},
            {'dt': 1e6, 'N':  1},
            {'dt': 5e5, 'N':  8},
            {'dt': 1e6, 'N':  8},
            {'dt': 5e5, 'N': 16},
            {'dt': 1e6, 'N': 16},
        )
        rate = 1e-9
        immersed_surface_area = 1

        number_of_real_droplets = 1024
        total_time = 2e9  # effectively interpretted here as seconds, i.e. cycle = 1 * si.s

        # dummy (but must-be-set) values
        vol = 44  # for sign flip (ice water has negative volumes), value does not matter
        d_v = 666  # products use conc., dividing there, multiplying here, value does not matter

        hgh = lambda t: np.exp(-0.8 * rate * (t - total_time / 10))
        low = lambda t: np.exp(-1.2 * rate * (t + total_time / 10))

        # Act
        output = {}

        for case in cases:
            n_sd = int(number_of_real_droplets // case['N'])
            assert n_sd == number_of_real_droplets / case['N']
            assert total_time // case['dt'] == total_time / case['dt']

            key = f"{case['dt']}:{case['N']}"
            output[key] = {'unfrozen_fraction': [], 'dt': case['dt'], 'N': case['N']}

            formulae = Formulae(
                heterogeneous_ice_nucleation_rate='Constant',
                constants={
                    'J_HET': rate / immersed_surface_area
                }
            )
            builder = Builder(n_sd=n_sd, backend=CPU(formulae=formulae))
            env = Box(dt=case['dt'], dv=d_v)
            builder.set_environment(env)
            builder.add_dynamic(Freezing(singular=False))
            attributes = {
                'n': np.full(n_sd, int(case['N'])),
                'immersed surface area': np.full(n_sd, immersed_surface_area),
                'volume': np.full(n_sd, vol)
            }
            products = (IceWaterContent(name='qi'),)
            particulator = builder.build(attributes=attributes, products=products)

            env['a_w_ice'] = np.nan

            cell_id = 0
            for i in range(int(total_time / case['dt']) + 1):
                particulator.run(0 if i == 0 else 1)

                ice_mass_per_volume = particulator.products['qi'].get()[cell_id]
                ice_mass = ice_mass_per_volume * d_v
                ice_number = ice_mass / (const.rho_w * vol)
                unfrozen_fraction = 1 - ice_number / number_of_real_droplets
                output[key]['unfrozen_fraction'].append(unfrozen_fraction)

        # Plot
        fit_x = np.linspace(0, total_time, num=100)
        fit_y = np.exp(-rate * fit_x)

        for out in output.values():
            pylab.step(
                out['dt'] * np.arange(len(out['unfrozen_fraction'])),
                out['unfrozen_fraction'],
                label=f"dt={out['dt']:.2g} / N={out['N']}",
                marker='.',
                linewidth=1 + out['N']//8
            )

        _plot_fit(fit_x, fit_y, low, hgh, total_time)
        if plot:
            pylab.show()

        # Assert
        for out in output.values():
            data = np.asarray(out['unfrozen_fraction'])
            arg = out['dt'] * np.arange(len(data))
            np.testing.assert_array_less(data, hgh(arg))
            np.testing.assert_array_less(low(arg), data)
コード例 #41
0
                volume=dv,
                spectrum=case['ISA'],
                droplet_volume=droplet_volume,
                multiplicity=multiplicity,
                total_time=total_time,
                number_of_real_droplets=number_of_real_droplets)
            output[key].append(data)

    # Plot
    for key, output_item in output.items():
        for run in range(n_runs_per_case):
            label = f"{key}: σ=ln({int(cases[key]['ISA'].s_geom)}),"\
                    f"N={int(cases[key]['ISA'].norm_factor * dv)}"
            pylab.step(dt / si.min * np.arange(len(output_item[run])),
                       output_item[run],
                       label=label if run == 0 else None,
                       color=cases[key]['color'],
                       linewidth=.666)
        output_item.append(np.mean(np.asarray(output_item), axis=0))
        pylab.step(dt / si.min * np.arange(len(output_item[-1])),
                   output_item[-1],
                   color=cases[key]['color'],
                   linewidth=1.666)

    pylab.legend()
    pylab.yscale('log')
    pylab.ylim(1e-2, 1)
    pylab.xlim(0, total_time / si.min)
    pylab.xlabel("t / min")
    pylab.ylabel("$f_{ufz}$")
    pylab.gca().set_box_aspect(1)
コード例 #42
0
y = 2.0 * x + 1e-3 * (x**2) + 1e-3 * (x**3)

plt.subplot(3, 1, 2)
plt.plot(t, y)
plt.xlabel('time')
plt.ylabel('amplitude')
plt.title('output non-linear')

yf = fft(y, int(N))  # output fft
Y = np.abs(yf[0:int(N / 2.0 + 1.0)])  # single-sided
Ydb = mag2db(Y)  # convert to db
Ydb = Ydb - np.amax(Ydb)
n = np.arange(1.0, (N / 2.0 + 1.0) + (1.0), 1.0)
plt.subplot(3, 1, 3)

plt.step(n[0:1000], Ydb[0:1000])
plt.xlabel('KHz')
plt.xticks(range(0, 1000, 50))
plt.ylabel('magnitude')
plt.title('Two-tone output FFT with Intermodulation products')

M1 = M1 + 1.0  # moved 1 bin up in fft
M2 = M2 + 1.0

m1dB = Ydb[int(M1) - 1]  # tone1 mag
m2dB = Ydb[int(M2) - 1]  # tone2 mag
print 'f1 = %10.1f Hz, f1dB = %f dB' % (f1, m1dB)
print 'f2 = %10.1f Hz, f2dB = %f dB\n' % (f2, m2dB)
IM1 = Ydb[int((2. * M1 - M2)) - 1]
IM2 = Ydb[int((2. * M2 - M1)) - 1]
IM3 = (IM1 + IM2) / 2.
コード例 #43
0
    
    x = xnew
    omegavec[0:4,ii:ii+1] = omega
    states[0:12,ii:ii+1] = x
    measurements[0:7,ii:ii+1] = y
    estimatesKalman[0:12,ii:ii+1] = xhatKalman
    estimatesExtended[0:12,ii:ii+1] = xhatExtended


# Visualize simulation

if 1: # States
    plt.figure(1)
    plt.plot(np.transpose(timevec),np.transpose(estimatesKalman[2,:]))
    plt.plot(np.transpose(timevec),np.transpose(estimatesExtended[2,:]))
    plt.step(np.transpose(timevec),np.transpose(states[2,:]))
    plt.xlabel('Time, [s]')
    plt.ylabel('$\mathbf{r}(t)$')
    plt.legend(['x','y','z'],loc=1,fontsize=6)

    plt.figure(2)
    plt.plot(np.transpose(timevec),np.transpose(estimatesKalman[6,:]))
    plt.plot(np.transpose(timevec),np.transpose(estimatesExtended[6,:]))
    plt.step(np.transpose(timevec),np.transpose(states[6,:]))
    plt.xlabel('Time, [s]')
    plt.ylabel('${\mathbf{\eta}}(t)$')
    plt.legend(['$\phi$','$\Theta$','$\psi$'],loc=1,fontsize=6)

plt.show()
            
            
コード例 #44
0
ファイル: flagreader.py プロジェクト: mkolopanis/capo
##Write data to npz

if opts.npz is not None: npzname=opts.npz+'.npz'
else: npzname='%s_%s_%s_RFI.npz'%(jd,opts.ant,opts.pol)

if opts.verb: print 'Writing data to %s'%npzname
np.savez(npzname,grid=flg_arr,dJDs=t_arr,percent_f=pcnt_f,percent_t=pcnt_t)

#If you don't want plots, let's save everyone a smidgen of time and quit now
if not opts.show and not opts.save_wfall and not opts.save_freq: sys.exit()

##Plotting freq occupancy

if opts.save_freq or opts.show:
    if opts.verb: print 'Plotting frequency-occupancy plot'
    pylab.step(fqs,pcnt_f,where='mid')
    pylab.fill_between(fqs,0,pcnt_f,color='blue',alpha=0.3)
    pylab.xlabel('Frequency [MHz]')
    pylab.ylabel('Occupancy [%]')
    if len(args)==1: pylab.suptitle(file2jd(args[0]),size=15)
    else: pylab.suptitle('%s - %s'%(file2jd(args[0]),file2jd(args[len(args)-1])),size=15)
    
    #pylab.savefig('%s_%s_%s_F.png'%(jd,opts.ant,opts.pol)) #hard to plug into RTP
    if not opts.fimg is None: pylab.savefig(opts.fimg)
    else: pylab.savefig('%s_f.png'%args[0]) #zen.2451234.12345.xx.HH.uvcR_f.png 
    #^ it's being run on each uvcR file in the RTP system
    if opts.show:
        pylab.show()
    else:
        pylab.close()
コード例 #45
0
def graph_cdf_changed_overlap_cut_bj_v2(infos):
    
    intersectionpp_files = [x for x in listdir(infos["output_dir"])
        if x.find("intersections_with") != -1]

    intersect_num = 0
    changed_overlap = list()
   
    print "graph_cdf_changed_overlap_cut_bj_v2"
    for f in intersectionpp_files:
        
        print f
        infos["node"] = f.split(".",1)[1]
        out_ids_intersect = open("ids/ids_cdf_changed_overlap_cut_bj_v2."+infos["node"] ,"w")
        
        for line in open(infos["output_dir"]+"/"+f,"r"):

            line_parts = line.split(" ")

            overlap_oldpath = path.path_fromstr(line_parts[7])
            overlap_newpath = path.path_fromstr(line_parts[11])

            lczs_detection_old = [path.hops_fromstr(x.split(";")[0]) \
                    for x in line_parts[3].split("#")]
           
            intersections = [x.split(";")[2] \
                    for x in line_parts[3].split("#") ]

            overlap_changes_old_str = line_parts[13]
            overlap_changes_new_str = line_parts[15]

            overlap_changes_old = []
            overlap_changes_new = []
            if overlap_changes_old_str != "empty":
                overlap_changes_old = [x.strip(",").split(",") for x in \
                    overlap_changes_old_str.split("#") if x]
                overlap_changes_new = [x.strip(",").split(",") for x in \
                    overlap_changes_new_str.split("#") if x]


            statistical.fix_branch_join(overlap_oldpath,overlap_newpath,\
                    overlap_changes_old, overlap_changes_new)
           
            for j,intersection in enumerate(intersections):
                lcz_detection_old = lczs_detection_old[j]
                
                branch_detection = lcz_detection_old[0]
                join_detection = lcz_detection_old[-1]
                intersection_hops = path.hops_fromstr(intersection)

                intersection_hops = [x for x in intersection_hops \
                    if x != ["255.255.255.255"] and x != branch_detection \
                        and x != join_detection]
               
                if(not intersection_hops):
                    continue

                total_coverage = set()
                for i,change_zone_old in enumerate(overlap_changes_old):

                    change_zone_old_hops = path.path_get_subpath(overlap_oldpath,\
                        change_zone_old[1:-1])
                    coverage_change_zone = statistical.list_intersection(\
                        change_zone_old_hops, intersection_hops, ignore_lb=False)
                    
                    if(coverage_change_zone):
                        set_2 = set([",".join(x) for x in coverage_change_zone]) 
                        total_coverage |= set_2
                
                set_intersect = set([",".join(x) for x in intersection_hops]) 
                v = float(len(total_coverage)) / len(set_intersect)
                changed_overlap.append(v)
                intersect_num += 1
                
                if(v>0):
                    print >> out_ids_intersect, ",".join(line_parts[0:3]),\
                        len(total_coverage), len(set_intersect)
        out_ids_intersect.close()

    uniq_values = list(set(changed_overlap))
    x = uniq_values
    x.sort()
    y = [changed_overlap.count(i)/float(intersect_num) for i in x]
    cdf = np.cumsum(y)

    o1 = open("dots/out_cdf_changed_overlap_cut_bj_v2.txt","w")
    for i in range(len(x)):
        print >> o1, x[i],y[i]
    o1.close()

    o2 = open("dots/dots_cdf_changed_overlap_cut_bj_v2.txt","w")
    for i in changed_overlap:
        print >> o2, i
    o2.close()
    
    plt.step(x,cdf, where="post")

    plt.xlabel("% of the intersect that has a LCZD",fontsize=16)
    plt.ylabel("CDF of all intersects on detections",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.xlim(0.0, 1.0)
    plt.savefig('out_cdf_changed_overlap_cut_bj_v2.pdf')
    plt.clf()
コード例 #46
0
	# Create a subselection of all bursts and bursts with TS > 25
	good_All = numpy.where(tsmap_P8_P301_Error90 > 0)
	good_HighTS = numpy.where(tsmap_P8_P301_MaxTS > 25)

	# Obtain the number of bursts that survived the cuts
	numberOfBurstsP8_90_All = len(angularSeperation_P7toP8[good_All]/tsmap_P8_P301_Error90[good_All])
	numberOfBurstsP8_95_All = len(angularSeperation_P7toP8[good_All]/tsmap_P8_P301_Error95[good_All])
	numberOfBurstsP8_90_HighTS = len(angularSeperation_P7toP8[good_HighTS]/tsmap_P8_P301_Error90[good_HighTS])
	numberOfBurstsP8_95_HighTS = len(angularSeperation_P7toP8[good_HighTS]/tsmap_P8_P301_Error95[good_HighTS])

	# Generate a cumulative sum array
	cumulativeSumP8_90_All = numpy.arange(numberOfBurstsP8_90_All)/(float(numberOfBurstsP8_90_All)-1)
	cumulativeSumP8_95_All = numpy.arange(numberOfBurstsP8_95_All)/(float(numberOfBurstsP8_95_All)-1)

	# Plot the angular seperation between P7 and P8 normalized by the P8_P301 90% Error
	plt.step(numpy.sort(angularSeperation_P7toP8[good_All]/tsmap_P8_P301_Error90[good_All]), cumulativeSumP8_90_All)
	plt.step(numpy.sort(angularSeperation_P7toP8[good_All]/tsmap_P8_P301_Error95[good_All]), cumulativeSumP8_95_All)
	i = numpy.where(numpy.sort(angularSeperation_P7toP8[good_All]/tsmap_P8_P301_Error90[good_All]) == angularSeperation_P7toP8[GRB130427A]/tsmap_P8_P301_Error90[GRB130427A])
	plt.scatter(angularSeperation_P7toP8[GRB130427A]/tsmap_P8_P301_Error90[GRB130427A], cumulativeSumP8_90_All[i])
	plt.annotate('GRB130427A', xy=(angularSeperation_P7toP8[GRB130427A]/tsmap_P8_P301_Error90[GRB130427A], cumulativeSumP8_90_All[i] ), xytext=(-20,-20), textcoords='offset points', ha='center', va='bottom')
	plt.legend(('P8_301 90% C.L.', 'P8_301 95% C.L.'), frameon=False, scatterpoints=1, loc=4)
	plt.plot([1,1],[0,1.05], '--')
	plt.ylim(0,1.05)
	plt.xlim(0,5)
	plt.xlabel(r'$\theta_{\rm P7 to P8} / \sigma$')	
	plt.show()

	# Select a subset of bursts with known x-ray, optical, or radio localizations
	#good = numpy.where((LIKE == 1) & (BestSource != 'Fermi-LAT') & (BestSource != 'Fermi-GBM') & (BestSource != 'IPN'))[0]
	good = numpy.where((tsmap_P8_P301_Error95 > 0) & (BestSource != 'Fermi-LAT') & (BestSource != 'Fermi-GBM') & (BestSource != 'IPN'))[0]
	GRB130427A = numpy.where(GRBs[good] == '130427324')
コード例 #47
0
def graph_generate_basic(infos):
    
    evaluation_2_files = [x for x in listdir(infos["output_dir"])
        if x.find("evaluation_2") != -1]

    number_of_detections = 0
    number_of_intersects = 0
    number_of_lcz_in_intersects = 0 
    size_of_lcz = defaultdict(lambda: 0)
    
    for f in evaluation_2_files:
        uniq_detections = set()
        for line in open(infos["output_dir"]+"/"+f,"r"):
            
            number_of_intersects += 1
            
            line_parts = line.strip().split(" ")
            uniq_detections.add(line_parts[0])
            if(line_parts[3] == "0,0"):
                continue
            for lcz_info in line_parts[3].split(";"):
                lcz_size,lcz_coverage = lcz_info.split(",")
                lcz_size = int(lcz_size)
                size_of_lcz[lcz_size] += 1
                number_of_lcz_in_intersects += 1
        
        number_of_detections += len(uniq_detections)
    
    print "number_of_intersects: ",number_of_intersects
    print "number_of_detections: ",number_of_detections
    print "number_of_lcz_in_intersects: ", number_of_lcz_in_intersects

    x = [z for z in size_of_lcz.keys()]
    x.sort()
    y = [size_of_lcz[i]/float(number_of_lcz_in_intersects)\
        for i in x]
    cdf = np.cumsum(y)

    plt.step(x,cdf,label="CDF of LCZ size", where="post")

    plt.xlabel("LCZ size",fontsize=16)
    plt.ylabel("CDF of all LCZ in overlapping changed routes",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.legend(loc='lower right')
    plt.savefig('basic_1.pdf')
    plt.clf()

    uniq_size_of_lcz = defaultdict(lambda: 0)
    uniq_number_of_lcz_in_intersects = 0
    uniq_number_of_intersects = 0
    for f in evaluation_2_files:
        overlap_change_ids = set()
        for line in open(infos["output_dir"]+"/"+f,"r"):
            

            line_parts = line.strip().split(" ")
            overlap_change_id = line_parts[1] + " " + line_parts[2]
            if overlap_change_id in overlap_change_ids:
                continue
            overlap_change_ids.add(overlap_change_id)
            
            uniq_number_of_intersects += 1
            if(line_parts[3] == "0,0"):
                continue
            
            for lcz_info in line_parts[3].split(";"):
                if(not lcz_info):
                    continue
                lcz_size,lcz_coverage = lcz_info.split(",")
                lcz_size = int(lcz_size)
                uniq_size_of_lcz[lcz_size] += 1
                uniq_number_of_lcz_in_intersects += 1
    
    print "uniq_number_of_lcz_in_intersects", uniq_number_of_lcz_in_intersects
    print "uniq_number_of_intersects", uniq_number_of_intersects
    x = [z for z in uniq_size_of_lcz.keys()]
    x.sort()
    y = [uniq_size_of_lcz[i]/float(uniq_number_of_lcz_in_intersects)\
        for i in x]
    cdf = np.cumsum(y)

    plt.step(x,cdf,label="CDF of LCZ size", where="post")

    plt.xlabel("LCZ size",fontsize=16)
    plt.ylabel("CDF of all uniq LCZ in overlapping changed routes",fontsize=16)

    plt.ylim(0.0, 1.0)
    plt.legend(loc='lower right')
    plt.savefig('basic_2.pdf')
    plt.clf()