Exemple #1
0
def plot_phi():
    n = 10
    nf = 1000

    # for the red points
    x = np.linspace(0, 1, n)
    # for the curve
    xf = np.linspace(0, 1, nf)

    phi = lambda x: x + 0.1 * np.sin(3 * np.pi * x)

    y = phi(x)

    # default style 'b-' solid blue line
    plt.plot(xf, phi(xf), lw=2)  # lw line width
    # 'ro' plot points as red circles
    plt.plot(x, y, "ro", ms=8)  # ms marker size

    # The 'bases' (blobs) of the vlines and hlines
    plt.plot(x, [0] * n, "ko", ms=8)
    plt.plot([0] * n, y, "ko", ms=8)
    # Dotted vlines and hlines
    plt.vlines(x, [0], y, linestyle="--")
    plt.hlines(y, [0], x, linestyle="--")
    plt.title(r"$y=\phi(x)$", fontsize=24)
    plt.xlabel(r"$x$", fontsize=24)
    plt.ylabel(r"$y$", fontsize=24)
    plt.show()
def plot_percentiles(data, numbins, xlim, ylim, vert = True, color = 'k', linestyle = 'solid', linew = 2):
    perc = 1. / numbins 
    for i in range(1, numbins):
        if vert:
            plt.vlines(sts.scoreatpercentile(data, i * perc * 100.), ylim[0], ylim[1], color, linestyle, linewidth = linew)
        else:
            plt.hlines(sts.scoreatpercentile(data, i * perc * 100.), xlim[0], xlim[1], color, linestyle, linewidth = linew)
Exemple #3
0
def main(args):

    histogram = args.histogram
    min_limit = args.min_limit
    max_limit = args.max_limit
    kmer = args.kmer
    output_name = args.output_name

    Kmer_histogram = pd.io.parsers.read_csv(histogram, sep="\t", header=None)
    Kmer_coverage = Kmer_histogram[Kmer_histogram.columns[0]].tolist()
    Kmer_count = Kmer_histogram[Kmer_histogram.columns[1]].tolist()
    Kmer_freq = [Kmer_coverage[i] * Kmer_count[i] for i in range(len(Kmer_coverage))]
    # coverage peak, disregarding initial peak
    kmer_freq_peak = Kmer_freq.index(max(Kmer_freq[min_limit:max_limit]))
    kmer_freq_peak_value = max(Kmer_freq[min_limit:max_limit])

    xmax = max_limit
    ymax = kmer_freq_peak_value + (kmer_freq_peak_value * 0.30)

    plt.plot(Kmer_coverage, Kmer_freq)
    plt.title("K-mer length = {}".format(kmer))
    plt.xlim((0, xmax))
    plt.ylim((0, ymax))
    plt.vlines(kmer_freq_peak, 0, kmer_freq_peak_value, colors="r", linestyles="--")
    plt.text(kmer_freq_peak, kmer_freq_peak_value + 2000, str(kmer_freq_peak))
    plotname = "{}".format(output_name)
    plt.savefig(plotname)
    plt.clf()
    return 0
def plot(learned_q, imitated_q):
    import matplotlib.pyplot as plt
    import matplotlib.gridspec as gridspec
    plt.ion()
    plt.figure(figsize=(16,9)) # Change this if figure is too big (i.e. laptop)
    gs = gridspec.GridSpec(1, 2, width_ratios=[1,2])
    
    while True:
        plt.subplot(gs[0])
        plt.cla()
        plt.title('Learned.')
        plt.legend(plt.plot(learned_q.get()), ['level1', 'envelope1', 'pitch1', 'centroid1'])
        plt.draw()

        plt.subplot(gs[1])
        plt.cla()
        plt.title('Listening + imitation.')
        input_data, sound = imitated_q.get()
        plt.plot(np.vstack( [input_data, sound] ))
        ylim = plt.gca().get_ylim()
        plt.vlines(input_data.shape[0], ylim[0], ylim[1])
        plt.gca().annotate('imitation starts', xy=(input_data.shape[0],0), 
                           xytext=(input_data.shape[0] + 10, ylim[0] + .1))
        plt.gca().set_ylim(ylim)
        plt.draw()

        plt.tight_layout()
def plotOEM4strainsT(oemfilestrain1,strain1label,oemfilestrain2,strain2label,oemfilestrain3,strain3label,oemfilestrain4,strain4label,chromosome,showorigins,showreptime,reptimefile=''):
    
    # if showreptime is true, then grab all rep times for chromosome and bin the bases in earlyreptimebases and latereptimebass.
    if showreptime:
        with open(reptimefile) as f:
            reptimeforchrom = [line.strip().split('\t')[1:] for line in f.readlines() if line.strip().split('\t')[0]==chromosome]
        earlyreptimebases = [int(i[0]) for i in reptimeforchrom if float(i[1])<=30]
        latereptimebases = [int(i[0]) for i in reptimeforchrom if float(i[1])>30]    
    
    # Plot OEM for WT with origins if showorigins is TRUE, otherwise plot oem but don't show confirmed or likely orgins
    if showorigins:
        oem.plotOEM(oemfilestrain1,chromosome,True,411,strain1label,'green')
        plt.subplots_adjust(hspace=0.5)
        oem.plotOEM(oemfilestrain2,chromosome,True,412,strain2label,'blue')
        plt.subplots_adjust(hspace=0.5)
        oem.plotOEM(oemfilestrain3,chromosome,True,413,strain3label,'red')
        plt.subplots_adjust(hspace=0.5)
        oem.plotOEM(oemfilestrain4,chromosome,True,414,strain4label,'#FF7F00')
    else:
        oem.plotOEM(oemfilestrain1,chromosome,False,411,strain1label,'green')
        plt.subplots_adjust(hspace=0.5)
        oem.plotOEM(oemfilestrain2,chromosome,False,412,strain2label,'blue')
        plt.subplots_adjust(hspace=0.5)
        oem.plotOEM(oemfilestrain3,chromosome,False,413,strain3label,'red')
        plt.subplots_adjust(hspace=0.5)
        oem.plotOEM(oemfilestrain4,chromosome,False,414,strain4label,'#FF7F00')
    
    # if showreptime is true, plot vertical lines to indicate where the early and late replicating bases are
    if showreptime:
        for base in earlyreptimebases:
            plt.vlines(base,-1,1,colors='red')
        for base in latereptimebases:
            plt.vlines(base,-1,1,colors='black')  
    
    plt.show()
def show_fixed_lag_numberline():
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_xlim(0,10)
    ax.set_ylim(0,10)

    # draw lines
    xmin = 1
    xmax = 9
    y = 5
    height = 1

    plt.hlines(y, xmin, xmax)
    plt.vlines(xmin, y - height / 2., y + height / 2.)
    plt.vlines(4.5, y - height / 2., y + height / 2.)
    plt.vlines(6, y - height / 2., y + height / 2.)
    plt.vlines(xmax, y - height / 2., y + height / 2.)
    plt.vlines(xmax-1, y - height / 2., y + height / 2.)

    # add numbers
    plt.text(xmin, y-1.1, '$x_0$', fontsize=20, horizontalalignment='center')
    plt.text(xmax, y-1.1, '$x_k$', fontsize=20, horizontalalignment='center')
    plt.text(xmax-1, y-1.1, '$x_{k-1}$', fontsize=20, horizontalalignment='center')
    plt.text(4.5, y-1.1, '$x_{k-N+1}$', fontsize=20, horizontalalignment='center')
    plt.text(6, y-1.1, '$x_{k-N+2}$', fontsize=20, horizontalalignment='center')
    plt.text(2.7, y-1.1, '.....', fontsize=20, horizontalalignment='center')
    plt.text(7.2, y-1.1, '.....', fontsize=20, horizontalalignment='center')

    plt.axis('off')
    plt.show()
Exemple #7
0
def plot_percolation_data(gvals, m, M):
    for var in gvals:
        plt.title(var)
        if len(gvals[var].shape) == 1 and var != 'p':
            plt.vlines([m,M], 0, np.max(gvals[var]))
            plt.scatter(gvals['p'], gvals[var])
            plt.show()
def plot_read_len_frame_hist(meta_hist, rlrange, rlen_shifts, ibegin, iend, norm, color, fn_prefix):
    width=int(50/10.0*1.5)
    plt.figure(figsize=(width, 6))
    print "plotting read length frame distribution..."
    rlmin, rlmax = rlrange
    rlmin -= 3
    rlmax += 3
    ymax = 0
    for i in meta_hist:
        if i<ibegin or i>iend : continue
        x_rlen = np.arange(rlmin, rlmax+1)
        y_cnt = np.array([0.1]*len(x_rlen))
        for rlen, cnt in meta_hist[i].iteritems():
            rlen += rlen_shifts[i%3]
            idx = rlen-rlmin
            y_cnt[idx] = cnt
        if norm == True:
            y_cnt /= np.mean(y_cnt[y_cnt!=0])
        y_cnt = np.log10(y_cnt)
        # plt.semilogy(rlen, cnt, color=color[i%3], lw=2, alpha=0.1)
        plt.plot(x_rlen, y_cnt, color=color[i%3], lw=2, alpha=0.1)
        ymax_loc = max(y_cnt)
        if ymax_loc > ymax:
            ymax = ymax_loc
    plt.vlines(range(1,rlmax,3), -1, ymax, colors='r', lw=2, alpha=0.3)
    plt.xlabel("read length")
    plt.ylabel("log footprint count")
    plt.xlim((rlmin,rlmax))
    plt.ylim((-1,ymax))
    xmin = rlmin - rlmin%5
    xmax = rlmax - rlmax%5
    plt.xticks(range(xmin, xmax+5, 5))
    # plt.show()
    plt.savefig("{0}_rlen_frame_hist.pdf".format(fn_prefix), bbox_inches='tight')
    plt.close()
    def plot_area_vs_energy(self, filename=None, show_save_energy=True):
        """
        Plot effective area vs. energy.
        """
        import matplotlib.pyplot as plt

        energy_hi = self.energy_hi.value
        effective_area = self.effective_area.value
        plt.plot(energy_hi, effective_area)
        if show_save_energy:
            plt.vlines(self.energy_thresh_hi.value, 1E3, 1E7, 'k', linestyles='--')
            plt.text(self.energy_thresh_hi.value - 1, 3E6,
                     'Safe energy threshold: {0:3.2f}'.format(
                         self.energy_thresh_hi),
                     ha='right')
            plt.vlines(self.energy_thresh_lo.value, 1E3, 1E7, 'k', linestyles='--')
            plt.text(self.energy_thresh_lo.value + 0.1, 3E3,
                     'Safe energy threshold: {0:3.2f}'.format(self.energy_thresh_lo))
        plt.xlim(0.1, 100)
        plt.ylim(1E3, 1E7)
        plt.loglog()
        plt.xlabel('Energy [TeV]')
        plt.ylabel('Effective Area [m^2]')
        if filename is not None:
            plt.savefig(filename)
            log.info('Wrote {0}'.format(filename))
Exemple #10
0
def display(params_estimated):

    # Construct matrix of experimental data and variance columns of interest
    exp_obs_norm = exp_data[data_names].view(float).reshape(len(exp_data), -1).T
    var_norm = exp_data[var_names].view(float).reshape(len(exp_data), -1).T
    std_norm = var_norm ** 0.5

    # Simulate model with new parameters and construct a matrix of the
    # trajectories of the observables of interest, normalized to 0-1.
    solver.run(params_estimated)
    obs_names_disp = obs_names + ['aSmac']
    sim_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
    totals = obs_totals + [momp_obs_total]
    sim_obs_norm = (sim_obs / totals).T

    # Plot experimental data and simulation on the same axes
    colors = ('r', 'b')
    for exp, exp_err, sim, c in zip(exp_obs_norm, std_norm, sim_obs_norm, colors):
        plt.plot(exp_data['Time'], exp, color=c, marker='.', linestyle=':')
        plt.errorbar(exp_data['Time'], exp, yerr=exp_err, ecolor=c,
                     elinewidth=0.5, capsize=0, fmt=None)
        plt.plot(solver.tspan, sim, color=c)
    plt.plot(solver.tspan, sim_obs_norm[2], color='g')
    plt.vlines(momp_data[0], -0.05, 1.05, color='g', linestyle=':')
    plt.show()
Exemple #11
0
def _expand_dendrogram(cNode,swc_tree,off_x,off_y,radius,transform='plain') :
    global max_width,max_height # middle name d.i.r.t.y.
    '''
    Gold old fashioned recursion... sys.setrecursionlimit()!
    '''
    place_holder_h = H_SPACE
    max_degree = swc_tree.degree_of_node(cNode)
    required_h_space = max_degree * place_holder_h
    start_x = off_x-(required_h_space/2.0)
    if(required_h_space > max_width) :
        max_width = required_h_space
    
    if swc_tree.is_root(cNode) :
        print 'i am expanding the root'
        cNode.children.remove(swc_tree.get_node_with_index(2))
        cNode.children.remove(swc_tree.get_node_with_index(3))
    
    for cChild in cNode.children :
        l = _path_between(swc_tree,cChild,cNode,transform=transform)
        r = cChild.content['p3d'].radius

        cChild_degree = swc_tree.degree_of_node(cChild)
        new_off_x = start_x + ( (cChild_degree/2.0)*place_holder_h )
        new_off_y = off_y+(V_SPACE*2)+l
        r = r if radius  else 1
        plt.vlines(new_off_x,off_y+V_SPACE,new_off_y,linewidth=r,colors=C)
        if((off_y+(V_SPACE*2)+l) > max_height) :
            max_height = off_y+(V_SPACE*2)+l

        _expand_dendrogram(cChild,swc_tree,new_off_x,new_off_y,radius=radius,transform=transform)

        start_x = start_x + (cChild_degree*place_holder_h)
        plt.hlines(off_y+V_SPACE,off_x,new_off_x,colors=C)
Exemple #12
0
def make_xi_plot():
	from numpy import linspace
	from matplotlib import pyplot as pl
	pl.rc('text', usetex=True)
	pl.rc('font', family='serif')
	lamdas = [-1,-0.9,-0.7,0,0.7,0.9,1]
	for lam in lamdas:
		plotx(lam,0,linestyle='k',minval=-0.9,maxval=10.0,logplot=True)
		plotx(lam,1,linestyle='k--',logplot=True)
		plotx(lam,2,logplot=True)
		plotx(lam,3,linestyle='k--',logplot=True)
	pl.vlines(x=1,ymin=-2,ymax=-0.076,color='k')
	pl.xlabel(r'$$\xi$$',fontsize=16)
	pl.ylabel(r'$$\tau$$',fontsize=16)
	pl.text(0.0, 0, r'$$M=0$$', bbox=dict(facecolor='white', alpha=1))
	pl.text(0.0, 1.4, r'$$M=1$$', bbox=dict(facecolor='white', alpha=1))
	pl.text(0, 2.48, r'$$M=2$$', bbox=dict(facecolor='white', alpha=1))
	pl.text(1.3, -1.5, r'hyperbolic')
	pl.text(0.3, -1.5, r'elliptic')
	pl.annotate(r'$$\lambda = 1$$', xy=(-0.29, -0.19), xytext=(-1, -1),
            arrowprops=dict(facecolor='black', shrink=0.15, width=1,headwidth=5),
            )
	pl.annotate(r'$$\lambda = -1$$', xy=(0.7, 0.4), xytext=(0.8,0.8),
            arrowprops=dict(facecolor='black', shrink=0.15, width=1,headwidth=5),
            )
	pl.xlim((-2,2))
	pl.ylim((-2,3))
Exemple #13
0
def _plotKmerFixed(min_limit, max_limit, kmer, output_name):
    """Old kmerplot, kept just in case...
    """
    Kmer_histogram = pd.io.parsers.read_csv("histogram.hist", sep='\t',
            header=None)
    Kmer_coverage = Kmer_histogram[Kmer_histogram.columns[0]].tolist()
    Kmer_count = Kmer_histogram[Kmer_histogram.columns[1]].tolist()
    Kmer_freq = [Kmer_coverage[i]*Kmer_count[i] for i in \
            range(len(Kmer_coverage))]
    #coverage peak, disregarding initial peak
    kmer_freq_peak = Kmer_freq.index(max(Kmer_freq[min_limit:max_limit]))
    kmer_freq_peak_value=max(Kmer_freq[min_limit:max_limit])

    xmax = max_limit
    ymax = kmer_freq_peak_value + (kmer_freq_peak_value*0.30)

    plt.plot(Kmer_coverage, Kmer_freq)
    plt.title("K-mer length = {}".format(kmer))
    plt.xlim((0,xmax))
    plt.ylim((0,ymax))
    plt.vlines(kmer_freq_peak, 0, kmer_freq_peak_value, colors='r',
            linestyles='--')
    plt.text(kmer_freq_peak, kmer_freq_peak_value+2000, str(kmer_freq_peak))
    plotname = "{}".format(output_name)
    plt.savefig(plotname)
    plt.clf()
    return 0
def plot_simp(n):
    xi, wi = qnwsimp(n+1, xmin, xmax)
    
    fig = plt.figure()
    plt.plot(x, f(x), linewidth=3, label=r'$f(x)$')
    
    for k in range(n//2):
        xii = xi[(2*k):(2*k+3)]
        xiii = linspace(xii[0], xii[2], 125)
        p = fitquad(xii)
        plt.fill_between(xiii, p(xiii), color='yellow')    
        if k==0:
            plt.plot(xiii, p(xiii),'r--', label=r'$\tilde{f}_{%d}(x)$' % (n+1))
        else:
            plt.plot(xiii, p(xiii),'r--')
    
    plt.vlines(xi, 0, f(xi),'k', linestyle=':')
    plt.hlines(0,xmin-0.1, xmax+0.1,'k',linewidth=2)
    plt.xlim(xmin-0.1, xmax+0.1)
    xtl = ['$x_{%d}$' % i for i in range(n+1)]
    xtl[0] += '=a'
    xtl[n] += '=b'
    plt.xticks(xi, xtl)
    plt.yticks([0],['0'])
    plt.legend()
    return fig
def funComputeRatioFromExperiment(dataRatioRGBY, dataRatioRGBL, valLevelBasis, valLevelTarget, dataTargetY, dataBasisY, titleText, figureName):
    '''
    The function first first display the results.
    Then it does some computations because computations are always good.
    '''
    # do some interpolation
    interpInput, interpY       =  funInterpolationRatioDifferenceCurves(vecSearchLevel, dataRatioRGBY)
    interpInput, interpTargetY =  funInterpolationRatioDifferenceCurves(vecSearchLevel, dataTargetY)
    interpInput, interpBasisY  =  funInterpolationRatioDifferenceCurves(vecSearchLevel, dataBasisY)
    
    # figure to show the search of equivalent intensity between the patches
    fig = plt.figure()
    yMin = 0
    yMax = 4.2*np.max(interpTargetY)
    #print yMax

    # plot the differences and minimum
    plt.plot(interpInput, interpY[0,:],'r-', label="Y difference Red ")
    plt.plot(interpInput, interpY[1,:],'g-', label="Y difference Green")
    plt.plot(interpInput, interpY[2,:],'b-', label="Y difference Blue")

    # plot the measured intensity
    plt.plot(interpInput, interpBasisY[0,:],'r--', label="Y Red + basis ")
    plt.plot(interpInput, interpBasisY[1,:],'g--', label="Y Green + basis")
    plt.plot(interpInput, interpBasisY[2,:],'b--', label="Y Blue + basis")

    # plot the target patch who should stay stable
    plt.plot(interpInput, interpTargetY[0,:],'k-', label="Y target for red ")
    plt.plot(interpInput, interpTargetY[1,:],'k--',label="Y target for green")
    plt.plot(interpInput, interpTargetY[2,:],'k-', label="Y target for blue")

    # plot the minimum
    minDiffInterpRGB, indRGB = funGetSomeMinimumSingleCurve(interpY)
    plt.plot(indRGB[0], minDiffInterpRGB[0],'r^')
    plt.plot(indRGB[1], minDiffInterpRGB[1],'g^')
    plt.plot(indRGB[2], minDiffInterpRGB[2],'b^')
    plt.vlines(indRGB[0],0,minDiffInterpRGB[0], colors='r',linestyles='--')
    plt.vlines(indRGB[1],0,minDiffInterpRGB[1], colors='g',linestyles='--')
    plt.vlines(indRGB[2],0,minDiffInterpRGB[2], colors='b',linestyles='--')

    # plot the experiment information
    plt.vlines(valLevelBasis[0], yMin, yMax, colors='k', linestyles='--', label='Basis')
    plt.vlines(valLevelTarget, yMin, yMax, colors='k', linestyles='--', label='Target')
    plt.text(valLevelBasis[0], yMax*0.9,'Basis = '+repr(valLevelBasis[0]), ha="left",bbox = dict(boxstyle='round', fc="w", ec="k"))
    plt.text(valLevelTarget, yMax*0.8,'Target = '+repr(valLevelTarget), ha="left",bbox = dict(boxstyle='round', fc="w", ec="k"))

    plt.ylabel('Difference in Y')
    plt.xlabel('Digital input')
    plt.xlim(0,255)
    plt.title('Difference Curve for Ratio')
    plt.ylim(yMin, yMax)
    plt.title(titleText)
    #plt.legend(loc=2)
    plt.draw()
    plt.savefig(figureName)

    ratioRGB = np.zeros([3])
    ratioRGB[0], ratioRGB[1], ratioRGB[2] = indRGB[0], indRGB[1], indRGB[2]

    return ratioRGB
def graph_pdf(dailyReturns):
    kde = sm.nonparametric.KDEUnivariate(dailyReturns)
    kde.fit()
    v1 = len(kde.cdf[kde.cdf<0.05])
    var = np.array(dailyReturns)[np.array(dailyReturns) < np.percentile(dailyReturns,5)]
    print var
    plt.vlines(kde.support[v1],0,100,colors=c[4],lw=1.5,linestyles='-', label='VaR 5%', zorder=5)
    plt.vlines(np.mean(var),0,100,colors=c[4],lw=1.5,linestyles=':', label='Expected shortfall 5%', zorder=5)

    plt.plot(kde.support[v1:], kde.density[v1:], color=c[1], alpha=1,zorder=2)
    plt.plot(kde.support[:v1], kde.density[:v1], color=c[3], alpha=1,zorder=2)
    plt.fill_between(kde.support[v1:], kde.density[v1:], edgecolor="None", color=c[1], alpha=0.55,zorder=3)
    plt.fill_between(kde.support[:v1], kde.density[:v1], edgecolor="None", color=c[3], alpha=0.55,zorder=3)
    ax = plt.gca()
    w=0.025
    ax.set_xlim(1-w-0.0005, 1+w+0.0005)
    ax.set_ylim(-3, 79)
    ax.set_xticklabels(['-3%', '-2%', '-1%', '0%', '+1%','+2%','+3%'])
    ax.set_yticklabels([''])
    plt.legend()

    #plt.show()
    plt.savefig('probdf.pdf')

    exit()
Exemple #17
0
def plot_zrtt_treshold(data, output_path):
    threshold = 1
    gateways, zrtts = [], []
    for hop in data:
        ip, pais, zrtt = hop
        gateways.append(ip+"\n"+pais)
        zrtts.append(float(zrtt))
    gateways.reverse()
    zrtts.reverse()
    
    fig = plt.figure()
    y_pos = np.arange(len(gateways))
    plt.barh(y_pos, zrtts, align='center', alpha=0.4)
    plt.yticks(y_pos, gateways, horizontalalignment='right', fontsize=9)
    plt.title('ZRTTs para cada hop')
    plt.xlabel('ZRTT')
    plt.ylabel('Hop')

    # Line at y=0
    plt.vlines(0, -1, len(gateways), alpha=0.4)

    # ZRTT threshold
    plt.vlines(threshold, -1, len(gateways), linestyle='--', color='b', alpha=0.4)
    plt.text(threshold, len(gateways) - 1, 'Umbral', rotation='vertical',
             verticalalignment='top', horizontalalignment='right')
    fig.set_size_inches(6, 9)
    plt.tight_layout() 
    plt.savefig(output_path, dpi=1000, box_inches='tight')
Exemple #18
0
def graphFrequency(parkingData, offset):

	parkingOccupancy = []
	for n in range (0, len(parkingData)):
		#print(parkingData[n])
		spotsTaken = 0
		for m in range (0, len(parkingData[n][0])):
			#print(parkingChanges[n][0][m])
			if parkingData[n][0][m] == 1:
				spotsTaken += 1
		parkingOccupancy.append((spotsTaken, (parkingData[n][1] - parkingData[0][1] + offset)/60/60/24))
		#print(parkingOccupancy[n])

	xcoordList = []
	ycoordList = []
	for n in range (0, len(parkingOccupancy)):
		ycoordList.append(parkingOccupancy[n][0])
		xcoordList.append(parkingOccupancy[n][1])
	plt.plot(xcoordList, ycoordList)
	day  = np.arange(0, int(xcoordList[len(xcoordList)-1]+2), 1)
	week = np.arange(0, int(xcoordList[len(xcoordList)-1]+2), 7)
	plt.vlines(day,  0, 15, 'black', 'dashed')
	plt.vlines(week, 0, 15)
	plt.ylabel('Spots Taken')
	plt.xlabel('Time (Days) (Midnight Monday = 0 days)')
	plt.title('Observed Data')
	plt.show()
Exemple #19
0
def graphSimples(simplesData, names, increment, title): # Simple data = [(spots #, time), (spots #, time) ...] such as (5, 0)

	hour  = np.arange(0, 604800 / increment, 3600 * 6 / increment)
	day   = np.arange(0, 604800 / increment, 86400 / increment)
	plt.vlines(hour,  0, 15, 'black', 'dashed')
	plt.vlines(day, 0, 15)

	for n in range(0, len(simplesData)):
		simpleData = simplesData[n]
		xcoordList = []
		ycoordList = []
		weeksFromEpoch = simpleData[0][1] / 604800

		for i in range (0, len(simpleData)):
			ycoordList.append(simpleData[i][0])

			relativeTime = simpleData [i][1] - (weeksFromEpoch * 604800) # To nearest week
			xcoordList.append(relativeTime/increment)

		plotted, = plt.plot(xcoordList, ycoordList)
		plotted.set_label(names[n])

	plt.ylabel('Spots Taken')
	plt.xlabel('Time (Minutes) (Monday Midnight = 0 minutes)')
	plt.title(title)

	plt.legend()

	plt.show()
def plot(points, centroids):
    import matplotlib.pyplot as plt
    import matplotlib.axes as pla

    plt.scatter([point.getX() for point in points],
                [point.getY() for point in points],
                marker='o',
                color='grey'
                ) 
     
    centroids = list(itertools.chain.from_iterable(centroids)) # flatten centroids
    plt.scatter([point.getX() for point in centroids],
                [point.getY() for point in centroids],
                marker='x',
                linewidths='2'
                )

    #plt.vlines([.67,1.24,1.72,3], -.02, .02) # hints_req
    #plt.vlines([.83,1.51,2.41,3.95], -.02, .02) # num_errors
    #plt.vlines([2.3,5.2,9,15.5], -.02, .02) # minSpent
    #plt.vlines([.618,.658,.728,.825], -.02, .02) # IncCor
    #plt.vlines([.0195,.043,.109,.211], -.02, .02) # IncHint
    #plt.vlines([.116,.144,.203,.296], -.02, .02) # IncInc
    plt.vlines([.015,.036,.067,.157], -.02, .02) # NumBOH


    
    plt.show()
def plot_results():
    from matplotlib import pyplot as plt

    global benchmarks

    n = 10000

    bar_labels = ['serial', '2', '3', '4', '6']

    fig = plt.figure(figsize=(10, 8))

    # plot bars
    y_pos = np.arange(len(benchmarks))
    plt.yticks(y_pos, bar_labels, fontsize=16)
    bars = plt.barh(y_pos, benchmarks,
                    align='center', alpha=0.4, color='g')

    # annotation and labels

    for ba, be in zip(bars, benchmarks):
        plt.text(ba.get_width() + 1.4, ba.get_y() + ba.get_height() / 2,
                 '{0:.2%}'.format(benchmarks[0] / be),
                 ha='center', va='bottom', fontsize=11)

    plt.xlabel('time in seconds for n=%s' % n, fontsize=14)
    plt.ylabel('number of processes', fontsize=14)
    t = plt.title('Serial vs. Multiprocessing via Parzen-window estimation', fontsize=18)
    plt.ylim([-1, len(benchmarks) + 0.5])
    plt.xlim([0, max(benchmarks) * 1.1])
    plt.vlines(benchmarks[0], -1, len(benchmarks) + 0.5, linestyles='dashed')
    plt.grid()

    plt.show()
Exemple #22
0
 def plot(self, logscale=True, with_validation=True, xlim=None, ylim=None):
     """
     Plots the loss history.
     :param logscale: if True, logarithmic scale is used
     :param with_validation: if True, validation set loss is plotted
     :param ylim: limits of the y-axis
     """
     if 'figsize' in dir(plt):
         plt.figsize(10, 5)
     plt.hold(True)
     try:
         if logscale:
             plt.yscale('log')
             # plt.xscale('log')
         plt.plot(self.history[0], self.history[1], 'b')
         if with_validation:
             plt.plot(self.history[0], self.history[2], 'c')
         plt.plot(self.history[0], self.history[3], 'r')
     except ValueError:
         # catches: ValueError: Data has no positive values, and therefore can not be log-scaled.
         # when no data is present or we only have NaNs
         pass
     if xlim is not None:
         plt.xlim(xlim)
     if ylim is not None:
         plt.ylim(ylim)
     yl = plt.ylim()
     if with_validation and self.best_iter is not None:
         plt.vlines(self.best_iter, yl[0], yl[1])
     plt.xlabel('iteration')
     plt.ylabel('loss')
     if with_validation:
         plt.legend(['training', 'validation', 'test'])
     else:
         plt.legend(['training', 'test'])
Exemple #23
0
def plot_unique_by_date(alignment_summaries, metadata):
    plt.figure(figsize=(8, 5.5))
    df_meta = pd.DataFrame.from_csv(metadata)
    df_meta['Date Produced'] = pd.to_datetime(df_meta['Date Produced'])

    alndata = []
    for summary in alignment_summaries:
        alndata.append(simpleseq.sam.get_alignment_metadata(summary))

    unique = pd.Series(np.array([s['uniq_rate'] for s in alndata]),
                       index=alignment_summaries)

    # plot unique alignments
    index = df_meta.index.intersection(unique.index)
    order = df_meta.loc[index].sort(columns='Date Produced', ascending=False).index
    left = np.arange(len(index))
    height = unique.ix[order]
    width = 0.9
    plt.barh(left, height, width)
    plt.yticks(left + 0.5, order, fontsize=10)
    ymin, ymax = 0, len(left)
    plt.ylim((ymin, ymax))
    plt.xlabel('percentage')
    plt.title('comparative alignment summary')
    plt.ylabel('time (descending)')

    # plot klein in-drop line
    plt.vlines(unique['Klein_in_drop'], ymin, ymax, color='indianred', linestyles='--')

    sns.despine()
    plt.tight_layout()
Exemple #24
0
    def plot(self):
        global TESTING, UNITS, HEIGHTS
        '''Prepares the data using self.prepare_data and then
        graphs the data on a plot.'''
        self.prepare_data()

        plt.plot(HEIGHTS, self.data)
        plt.hlines(self.significant_shear, 0, HEIGHTS[-1])
        plt.vlines(self.significant_shear_height, -1, 2)
        print 'Significant shear at image {0}'.format(self.x_significant_shear)
        if not TESTING:
            print 'Theoretical significant shear at height {0} {1}'.format(self.significant_shear_height, UNITS)

        plt.ylim([-1, 2])
        plt.xlim([HEIGHTS[0], HEIGHTS[-1]])
        
        plt.xlabel('Height ({0})'.format(UNITS))
        plt.ylabel('Coverage')
        plt.title(self.dp_path.split('/')[-1])
        
        try:
            os.mkdir('{0}/res'.format(self.dp_path))
        except:
            pass
        plt.savefig('{0}/res/results.png'.format(self.dp_path))
        with open('{0}/res/results.txt'.format(self.dp_path), 'w') as f:
            global MODE
            f.write('{0}\nMODE {1}\n'.format(str(self.significant_shear_height), MODE))
Exemple #25
0
def estPath(values):
    """estimates path
    Args:
       values: dict of x and y
    Returns:
       alphas: regularization lambdas
       coefs: coef matrix for features and alphas
    """
    X,y = values["x"], values["y"]
    alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
    return alphas,coefs

    print alphas
    print coefs
    print coefs[:,1]
    exit(1)
    xx = np.sum(np.abs(coefs.T), axis=1)
    xx /= xx[-1]

    plt.plot(xx, coefs.T)
    ymin, ymax = plt.ylim()
    plt.vlines(xx, ymin, ymax, linestyle='dashed')
    plt.xlabel('|coef| / max|coef|')
    plt.ylabel('Coefficients')
    plt.title('LASSO Path')
    plt.axis('tight')
    plt.show()
    plt.savefid("larspath.png")
    def plot_result(self, list_of_res, extra_txt='', dir_path='', big_figure=True, gui=False):
        if not gui or self.show:
            if big_figure:
                plt.figure(figsize=(10, 14))
            else:
                plt.figure()

        cpt = 1
        color = ['b', 'r', 'g', 'm', 'c', 'y', 'k']
        for key in list_of_res:
            plt.subplot(len(list_of_res.keys()), 1, cpt)
            plt.plot(list_of_res[key], color[cpt%len(color)], label=key)
            plt.ylabel(key, rotation=0)
            plt.ylim(-0.2, 1.2)
            for i in range(len(list_of_res['gnd_truth'])):
                if list_of_res['gnd_truth'][i-1] != list_of_res['gnd_truth'][i]:
                    plt.vlines(i, -0.2, len(list_of_res)*1.2+0.2, 'b', '--')
            cpt += 1

        plt.tight_layout()

        if self.save:
            plt.savefig(dir_path + 'result' + extra_txt + self.ext_img, dpi=100)

        if self.show:
            plt.show()
        else:
            if gui:
                plt.clf()
            else:
                plt.close()
Exemple #27
0
def plot_assumption_free(scores, data, bins=50):
    """
    Plots the scores from the analysis using the assumption free algorithm.
    """
    plt.figure()
    plt.subplot(2, 1, 1)
    (data.acc / data.acc.max()).plot()
    (data.hr / data.hr.max()).plot()
    data.ratio_log.plot()
    plt.legend(loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(data.index[:len(scores)], scores)

    scores = [x for x in scores if abs(x) > 10 ** -10]
    s_mean, sigma = norm.fit(scores)
    plt.figure()
    plt.hist(scores,  bins=50, normed=True)
    plt.plot(bins, norm.pdf(bins, loc=s_mean, scale=sigma))
    vlin = linspace(s_mean - 3 * sigma, s_mean + 3 * sigma, 13)
    step = int(256 / ((len(vlin) - 1) / 2))
    colors = linspace(0, 1, 256)[::step][:(len(vlin) - 1) / 2]
    colors = [(c, 0, 0) for c in colors]
    colors += [(1, 1, 1)]
    colors += [(0, c, 0) for c in reversed(colors)]
    plt.vlines(vlin.tolist()[1:], 0, 1, colors[1:])
def plotRaster(xsg, ax=None, height=1.):
    """Creates raster plot from a merged or unmerged XSG dictionary.
    Note that the dictionary has to have the key 'spikeTimes', which is 
    generated by detectSpikes().  The value of this key is a single numpy array
    with spike loctions in samples (or a list of such arrays).

    Note that we plot these based on the size of the traces themselves.
    This works because we split up our acquisitions, but in general, 
    we might want to only plot regions of the raster.  plt.xlim() should
    be able to be used post-hoc for this.

    :param: - xsg - a merged or unmerged XSG dictionary with a 'spikeTimes' entry
    :param: - ax - optional, a matplotlib axis to plot onto
    :param: - height - optional, spacing for the rasters
    """
    if ax is None:
        ax = plt.gca() # otherwise it'll plot on the top figure

    try:
        if type(xsg['spikeTimes']) is list:
            for trial, trace in enumerate(xsg['spikeTimes']):
                plt.vlines(trace, trial, trial+height)
            plt.ylim(len(xsg['spikeTimes']), 0)
            plt.xlim(0,float(xsg['ephys']['chan0'].shape[0]) / xsg['sampleRate'][0] * 1000.0)

        else:
            plt.vlines(xsg['spikeTimes'], 0, height)
            plt.ylim((0,1))
            plt.xlim(0,float(xsg['ephys']['chan0'].shape[0]) / xsg['sampleRate'] * 1000.0)

        plt.xlabel('time (ms)')
        plt.ylabel('trials')

    except:
        print 'No spike times found!'
def analyse_given_data_set(data):
    # Confirm that data has been read and output properties of file
    number_of_players = len(data)
    print "Data file read with %s players" % number_of_players

    # Calculating mean of guesses
    first_guess_mean = sum([e[1] for e in data]) / number_of_players
    print "Mean of the guess: %s so 2/3rds of mean is: %s" % (first_guess_mean, 2 * first_guess_mean / 3)

    first_guess_distance = [abs(e[1] - 2 * first_guess_mean / 3)for e in data]
    winning_first_guess = data[first_guess_distance.index(min(first_guess_distance))][1]
    print "Winning guess: %s" % winning_first_guess

    # Display winner
    print "The winning user name(s) are/is:"
    for e in data:
        if e[1] == winning_first_guess:
            print "\t" + e[0]
            print "\t\t" + e[0] + " guessed " + e[4] + " time(s) with the last guess on the " + e[3] + " (with url: " + e[2] + ")"

    # Plot histograms of guesses using matplotlib
    plt.figure()
    plt.hist([e[1] for e in data], bins=20, label='Guess', normed='True')
    plt.title("Two thirds of the average game ($N=%s$)." % number_of_players)
    plt.xlabel("Guess")
    plt.ylabel("Probability")
    max_y = plt.ylim()[1]
    plt.vlines(winning_first_guess, 0, max_y, label='Winning Guess: %s' % winning_first_guess, color='blue')
    plt.ylim(0, max_y)
    plt.xlim(0, 100)
    plt.legend()
    plt.savefig("Results_for_webapp.png")
def allDirectionalityRatios(ratioFunction):
    """
    A simple plot which calculates all directionality ratios, plots them
    and puts lines at 20 top highly expressed genes (Supp figure from our paper)
    This is mostly matplotlib code.
    """
    if not os.path.exists("savedHeatmaps"):
        os.mkdir("savedHeatmaps")
    wildRatio = np.log(ratioFunction("Wildtype_0min_BglII_rep1"))
    for j, dataset in enumerate(datasets):
        ax = plt.subplot(len(datasets), 1, j + 1)
        curRatio = (ratioFunction(dataset))
        plt.title("{1},  r = {0:.2f}, p={2:.2e}".format(pearsonr(curRatio, wildRatio)[0], names[dataset],
                                                      pearsonr(curRatio, wildRatio)[1]), fontsize=10)
        plt.tick_params(axis='both', which='major', labelsize=10)
        plt.tick_params(axis='both', which='minor', labelsize=8)
        plt.plot(curRatio)
        plt.ylim((0.25, 0.75))
        plt.xlim((0, len(curRatio)))
        #plt.ylim((0, 1))
        plt.yticks((0.25, 0.5, 0.75))
        geneCoor = [1162773, 3509071, 1180887, 543099, 1953250, 2522439, 3328524, 1503879, 900483, 242693, 3677144, 3931680, 3677704, 3762707, 3480870, 3829656, 1424678, 901855, 1439056, 3678537]
        genePos = [i / 10000. for i in geneCoor]
        #genePos = []
        for lpos in genePos:
            plt.vlines(lpos , -.8, .8, alpha=0.2, linewidth=1, color="black")
        plt.xticks([0, 50, 100, 150, 200, 250, 300, 350, 400], ["" for i in xrange(9)], fontsize=98)
        removeAxes(ax=ax)
        plt.subplots_adjust(0.07, 0.05, 0.94, 0.95, 0.2, 0.5)



    plt.show()
    exit()
Exemple #31
0
		print "usage: python cmd.py passby_fn gtfs_dir [stop_id [direction_id [service_id]]]"
		exit()

	passby_fn = sys.argv[1]
	gtfs_dir = sys.argv[2]

	if len(sys.argv)>3:
		stop_id = sys.argv[3]
	else:
		stop_id = None

	if len(sys.argv)>4:
		direction_id = sys.argv[4]
	else:
		direction_id = None

	if len(sys.argv)>5:
		service_id = sys.argv[5]
	else:
		service_id = None


	passby_secs, scheduled_secs = generate_schedule(passby_fn, gtfs_dir, stop_id, direction_id, service_id)

	from matplotlib import pyplot as plt

	plt.vlines( passby_secs, 0, 1.5, lw=0.05 )
	plt.vlines( scheduled_secs, 0.5, 2, color="red" )

	plt.show()
Exemple #32
0
"""
print(__doc__)

# Author: Fabian Pedregosa <*****@*****.**>
#         Alexandre Gramfort <*****@*****.**>
# License: BSD 3 clause

import numpy as np
import matplotlib.pyplot as plt

from mrex import linear_model
from mrex import datasets

X, y = datasets.load_diabetes(return_X_y=True)

print("Computing regularization path using the LARS ...")
_, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)

xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]

plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
Exemple #33
0
pitch_code = '[3,3,3,3,3,3,3,5,1,2,3]'
pitch_v = get_chroma_pitch(pitch_code)
onsets_base_frames = onsets_base_frames(code, h)
onsets_base_frames[-1] = onsets_base_frames[-1] - 1
print(onsets_base_frames)
print(np.diff(onsets_base_frames))
print(pitch_v)
v0 = 0
for i, v in enumerate(onsets_base_frames[1:]):
    print("v0,v is {},{},{}".format(v0, v, pitch_v[i]))
    if v == 337:
        print('=====')
    for f in range(v0, v):
        if img.item(pitch_v[i], f) == 0.5:
            img.itemset((pitch_v[i], f), 1)
            img.itemset((pitch_v[i], f), 1)
            img.itemset((pitch_v[i], f), 1)
        else:
            img.itemset((pitch_v[i], f), 0.8)
            img.itemset((pitch_v[i], f), 0.8)
            img.itemset((pitch_v[i], f), 0.8)
    v0 = v

start_point = 0.02
ds = onsets_base(code, time, start_point)
print("ds is {}".format(ds))
plt.vlines(ds, 0, sr, color='b', linestyle='solid')
librosa.display.specshow(img, x_axis='time', cmap='coolwarm')
#librosa.display.specshow(chromagram, x_axis='time',  cmap='coolwarm')
plt.show()
def main(frame_rate):
    frame_count = 0
    now = time.time()
    bitrate_data = {}
    once = False
    with subprocess.Popen(
        [
            "ffprobe",
            #"-threads", f"{cores-1}",
            "-show_entries",
            "packet=size,duration_time,pts_time,flags",
            "-select_streams",
            f"{spec}:{args.index}",
            "-print_format",
            "json=compact=1",
            args.input
        ],
            stdout=subprocess.PIPE,
            stderr=subprocess.DEVNULL) as proc_frame:
        for stdout_line in iter(proc_frame.stdout.readline, ""):
            stdout_line = stdout_line.decode("utf-8").replace("\r\n",
                                                              "").strip()

            if len(stdout_line) == 0:
                break
            if len(stdout_line) > 0 and stdout_line[-1] == ",":
                stdout_line = stdout_line[:-1]
            if "pts_time" in stdout_line:
                try:
                    decoded = dec_json.loads(stdout_line)
                except json.decoder.JSONDecodeError:
                    print(stdout_line)
                    raise Exception
                if not once and spec == "a":
                    frame_rate = 1.0 / float(decoded.get('duration_time'))
                    once = True

                frame_type = decoded.get("flags") if spec == "V" else "A"
                if frame_type == "K_":
                    frame_type = "I"
                else:
                    frame_type = "P"
                frame_bitrate = (float(decoded.get('size')) * 8 /
                                 1000) * frame_rate
                frame_time = float(decoded.get("pts_time"))
                frame = (frame_time, frame_bitrate)
                if frame_type not in bitrate_data:
                    bitrate_data[frame_type] = []
                bitrate_data[frame_type].append(frame)
                frame_count += 1
                if total_time is not None:
                    percent = (frame_time / total_time) * 100.0
                    sys.stdout.write("\rProgress: {:5.2f}%".format(percent))
    print(flush=True)

    print(f"Done gathering data: Taken {time.time() - now:.4f}s")
    print("Drawing matplot...")
    matplot.figure().canvas.set_window_title(args.input)
    matplot.title(f"{os.path.basename(args.input)}")
    matplot.xlabel("Time (sec)")
    matplot.ylabel("Frame Bitrate (kbit/s)")
    matplot.grid(True)
    # map frame type to color
    frame_type_color = {
        # audio
        'A': 'red',
        # video
        'I': 'red',
        'P': 'green',
        'B': 'blue'
    }
    global_peak_bitrate = 0.0
    global_mean_bitrate = 0.0

    for frame_type in ['I', 'P', 'B', 'A']:

        # skip frame type if missing
        if frame_type not in bitrate_data:
            continue

        # convert list of tuples to numpy 2d array
        frame_list = bitrate_data[frame_type]
        frame_array = numpy.array(frame_list)

        # update global peak bitrate
        peak_bitrate = frame_array.max(0)[1]
        if peak_bitrate > global_peak_bitrate:
            global_peak_bitrate = peak_bitrate

        # update global mean bitrate (using piecewise mean)
        mean_bitrate = frame_array.mean(0)[1]
        global_mean_bitrate += mean_bitrate * (len(frame_list) / frame_count)

        # plot chart using gnuplot-like impulses
        matplot.vlines(frame_array[:, 0], [0],
                       frame_array[:, 1],
                       color=frame_type_color[frame_type],
                       label="{} Frames".format(frame_type))

    # set y-axis limits if requested
    if args.min:
        matplot.ylim(ymin=args.min)
    if args.max:
        matplot.ylim(ymax=args.max)

    # calculate peak line position (left 15%, above line)
    peak_text_x = matplot.xlim()[1] * 0.15
    peak_text_y = global_peak_bitrate + \
                  ((matplot.ylim()[1] - matplot.ylim()[0]) * 0.015)
    peak_text = "peak ({:.0f})".format(global_peak_bitrate)

    # draw peak as think black line w/ text
    matplot.axhline(global_peak_bitrate, linewidth=2, color='black')
    matplot.text(peak_text_x,
                 peak_text_y,
                 peak_text,
                 horizontalalignment='center',
                 fontweight='bold',
                 color='black')

    # calculate mean line position (right 85%, above line)
    mean_text_x = matplot.xlim()[1] * 0.85
    mean_text_y = global_mean_bitrate + \
                  ((matplot.ylim()[1] - matplot.ylim()[0]) * 0.015)
    mean_text = "mean ({:.0f})".format(global_mean_bitrate)

    # draw mean as think black line w/ text
    matplot.axhline(global_mean_bitrate, linewidth=2, color='black')
    matplot.text(mean_text_x,
                 mean_text_y,
                 mean_text,
                 horizontalalignment='center',
                 fontweight='bold',
                 color='black')

    matplot.legend()

    # render graph to file (if requested) or screen
    if args.output:
        matplot.savefig(args.output, format=args.format)
    else:
        matplot.show()
Exemple #35
0
                           left_index=True,
                           right_index=True,
                           how='left')
        if i % count == 0:
            j += 1
            k = 1
        else:
            k += 1

print(all)
print(all.mean())
# Grouped boxplot
b = sns.boxplot(data=all)
sns.lineplot(x=range(len(all.columns)), y=90)
#sns.lineplot(x=range(len(all.columns)), y=10)
"""
plt.vlines(3.5,ymax=len(all),ymin=0)
plt.vlines(7.5,ymax=len(all),ymin=0)
plt.vlines(11.5,ymax=len(all),ymin=0)
plt.vlines(15.5,ymax=len(all),ymin=0)
plt.vlines(19.5,ymax=len(all),ymin=0)
"""
plt.vlines(4.5, ymax=len(all), ymin=0)
plt.vlines(9.5, ymax=len(all), ymin=0)
plt.vlines(14.5, ymax=len(all), ymin=0)
plt.vlines(19.5, ymax=len(all), ymin=0)

b.set_xlabel("X Label", fontsize=10)
b.set_ylabel("Y Label", fontsize=15)
b.set_title('Non-ADHD', fontsize=18)
plt.ylim([0, 300])
Exemple #36
0
import matplotlib.pyplot as plt
import numpy as np
import math


def square(x):
    return x * x


def f(vin):
    c = 2.2e-8
    r = 33.0
    l = 1e-6
    q = (vin * l /
         math.sqrt(square(r * (1 - square(vin) * l * c)) + square(vin * l)))
    return q


x = np.arange(5000000L, 12000000L, 1000)
y = map(f, x)

plt.plot((x / (2 * math.pi)) / 1000, y)

plt.vlines(1045, 0.2, 1)
plt.tick_params(axis='both', which='major', labelsize=24)

plt.title("Theoretical response of our Band-pass Filter", fontsize=36)
plt.xlabel("Frequency (kHz)", fontsize=36)
plt.ylabel("Filter gain", fontsize=36)
plt.show()
Exemple #37
0

def satview(h):
    a = np.pi * 0.5 - np.arcsin(R / (R + h))
    o = 4 * np.pi * np.sin(a * 0.5)**2
    return o * R**2


mansee = satview(H)  # ca 72 km**2

Hiss = 370.0e3  # ISS, Hubble
Hsun = 800.0e3  # sun-syncro satellites, science obs, Iridium satellites (flares in the sky)
Hmeo = 20.2e6  # GNSS
Hgeo = 35.786e6  #

Hbm = 29.0e6  # distance for Blue marble
Hma = 405.4e6  # Moon apogee
Hl1 = 1.4811e11  # Lagrangian point L1 Earth-Sun system
H = np.logspace(np.log10(1000), np.log10(Hbm), 1000)

plt.figure()
plt.plot(H, satview(H) / Se)
plt.vlines([Hiss, Hsun, Hmeo, Hbm], ymin=0.0, ymax=0.5)
plt.grid()

H = np.logspace(np.log10(10000), np.log10(Hl1), 1000)

plt.figure()
plt.semilogx(H, satview(H) / Se)
plt.vlines([Hiss, Hsun, Hmeo, Hgeo, Hbm, Hma, Hl1], ymin=0.0, ymax=0.5)
plt.grid()
Exemple #38
0
def analyzeSound(soundfile, outlist, plot=True, crm=True, tms=True, xml=False):
    var = {}
    # load soundfile
    y, sr = librosa.load(soundfile)
    var['y'] = y
    var['sr'] = sr
    # analyze onsets
    o_env = librosa.onset.onset_strength(y, sr=sr)
    times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr)
    onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
    var['onset_frames'] = onset_frames
    var['times'] = times
    if plot:
        plt.figure(figsize=(18, 8))
        ax1 = plt.subplot(2, 1, 1)
        librosa.display.waveplot(y[:])
        plt.title('Waveshape')
        plt.subplot(2, 1, 2, sharex=ax1)
        plt.plot(times, o_env, label='Onset strength')
        plt.vlines(times[onset_frames],
                   0,
                   o_env.max(),
                   color='r',
                   alpha=0.9,
                   linestyle='--',
                   label='Onsets')
        plt.axis('tight')
        plt.legend(frameon=True, framealpha=0.75)
    p = None
    if crm:
        chroma = librosa.feature.chroma_stft(y=y, sr=sr)
        var['chroma'] = chroma
        nseq = []
        for i in range(onset_frames.shape[0] - 1):
            nseq.append(np.argwhere(chroma[:, onset_frames[i]] == 1.0)[0, 0])
        var['nseq'] = PCSet(nseq, UNI=False, ORD=False)
        if plot:
            plt.figure(figsize=(18, 4))
            librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
            plt.colorbar()
            plt.title('Chromagram')
            plt.tight_layout()
        idx = np.argwhere(chroma == 1.0)
        p = np.histogram(idx[:, 0], 12)
        var['prob'] = np.asarray(p[0] / np.sum(p[0]))
        if plot:
            c = np.array([
                'C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B'
            ])
            plt.figure(figsize=(6, 4))
            plt.bar(c, p[0], width=0.8)
    if plot: plt.show()
    tempo = None
    if tms:
        tempo = librosa.beat.tempo(onset_envelope=o_env, sr=sr)
        beat = librosa.frames_to_time(onset_frames, sr=sr)
        beat = RHYTHMSeq((np.diff(beat) * 16).round(0) / 16, REF='e')
        var['beat'] = beat
        var['tempo'] = int(tempo[0])
        if plot: beat.displayRhythm(xml)
    output = []
    for out in outlist:
        output.append(var[out])
    return (output)
    def changepoint_date_analysis(self, search=None):
        self.reset_plot()

        model = self.create_model()
        
        # Use past self.training_years years of data
        train = self.stock[self.stock['Date'] > (self.max_date - pd.DateOffset(years = self.training_years)).date()]
        model.fit(train)
        
        # Predictions of the training data (no future periods)
        future = model.make_future_dataframe(periods=0, freq='D')
        future = model.predict(future)
    
        train = pd.merge(train, future[['ds', 'yhat']], on = 'ds', how = 'inner')
        
        changepoints = model.changepoints
        train = train.reset_index(drop=True)
        
        # Create dataframe of only changepoints
        change_indices = []
        for changepoint in (changepoints):
            change_indices.append(train[train['ds'] == changepoint.date()].index[0])
        
        c_data = train.ix[change_indices, :]
        deltas = model.params['delta'][0]
        
        c_data['delta'] = deltas
        c_data['abs_delta'] = abs(c_data['delta'])
        
        # Sort the values by maximum change
        c_data = c_data.sort_values(by='abs_delta', ascending=False)

        # Limit to 10 largest changepoints
        c_data = c_data[:10]

        # Separate into negative and positive changepoints
        cpos_data = c_data[c_data['delta'] > 0]
        cneg_data = c_data[c_data['delta'] < 0]

        # Changepoints and data
        if not search:
        
            print('\nChangepoints sorted by slope rate of change (2nd derivative):\n')
            print(c_data.ix[:, ['Date', 'Adj. Close', 'delta']][:5])

            # Line plot showing actual values, estimated values, and changepoints
            self.reset_plot()
            
            # Set up line plot 
            plt.plot(train['ds'], train['y'], 'ko', ms = 4, label = 'Stock Price')
            plt.plot(future['ds'], future['yhat'], color = 'navy', linewidth = 2.0, label = 'Modeled')
            
            # Changepoints as vertical lines
            plt.vlines(cpos_data['ds'].dt.to_pydatetime(), ymin = min(train['y']), ymax = max(train['y']), 
                       linestyles='dashed', color = 'r', 
                       linewidth= 1.2, label='Negative Changepoints')

            plt.vlines(cneg_data['ds'].dt.to_pydatetime(), ymin = min(train['y']), ymax = max(train['y']), 
                       linestyles='dashed', color = 'darkgreen', 
                       linewidth= 1.2, label='Positive Changepoints')

            plt.legend(prop={'size':10}) 
            plt.xlabel('Date')  plt.ylabel('Price (VNĐ)')  plt.title('Stock Price with Changepoints')
            plt.show()
        
        # Search for search term in google news
        # Show related queries, rising related queries
        # Graph changepoints, search frequency, stock price
        if search:
            date_range = ['%s %s' % (str(min(train['Date']).date()), str(max(train['Date']).date()))]

            # Get the Google Trends for specified terms and join to training dataframe
            trends, related_queries = self.retrieve_google_trends(search, date_range)

            if (trends is None)  or (related_queries is None):
                print('No search trends found for %s' % search)
                return

            print('\n Top Related Queries: \n')
            print(related_queries[search]['top'].head())

            print('\n Rising Related Queries: \n')
            print(related_queries[search]['rising'].head())

            # Upsample the data for joining with training data
            trends = trends.resample('D').sum()

            trends = trends.reset_index(level=0)
            trends = trends.rename(columns={'date': 'ds', search: 'freq'})

            # Interpolate the frequency
            trends['freq'] = trends['freq'].interpolate()

            # Merge with the training data
            train = pd.merge(train, trends, on = 'ds', how = 'inner')

            # Normalize values
            train['y_norm'] = train['y'] / max(train['y'])
            train['freq_norm'] = train['freq'] / max(train['freq'])
            
            self.reset_plot()

            # Plot the normalized stock price and normalize search frequency
            plt.plot(train['ds'], train['y_norm'], 'k-', label = 'Stock Price')
            plt.plot(train['ds'], train['freq_norm'], color='goldenrod', label = 'Search Frequency')

            # Changepoints as vertical lines
            plt.vlines(cpos_data['ds'].dt.to_pydatetime(), ymin = 0, ymax = 1, 
                       linestyles='dashed', color = 'r', 
                       linewidth= 1.2, label='Negative Changepoints')

            plt.vlines(cneg_data['ds'].dt.to_pydatetime(), ymin = 0, ymax = 1, 
                       linestyles='dashed', color = 'darkgreen', 
                       linewidth= 1.2, label='Positive Changepoints')

            # Plot formatting
            plt.legend(prop={'size': 10})
            plt.xlabel('Date')  plt.ylabel('Normalized Values')  plt.title('%s Stock Price and Search Frequency for %s' % (self.symbol, search))
            plt.show()
mark = []
for j in peaks:
    mark.append(j * hop_size)
    mark.append(j * hop_size + window_size)

for i in range(0, len(mark), 2):
    text_file_1.write('%06.3f' % (mark[i] * 0.0000625) + "\t" + '%06.3f' %
                      (mark[i + 1] * 0.0000625) + "\t" + "Vowel" + "\n")

#######################################################################################################################
# What : Plotting the peaks and segments
plt.plot(st_energy, 'red', label='Short term energy')
# plt.plot(st_energy_1, 'blue')
plt.vlines(peaks[0],
           min(st_energy),
           max(st_energy),
           'green',
           label='Peak',
           linestyles='dashed')
for j in peaks:
    plt.vlines(j, min(st_energy), max(st_energy), 'green', linestyles='dashed')
# plt.vlines(segment_boundary[0], min(st_energy), max(st_energy), 'black', label='Segment boundary')
# for j in segment_boundary:
#     plt.vlines(j, min(st_energy), max(st_energy), 'black')
plt.ylabel('Magnitude')
plt.xlabel('Frame Number')
plt.text(1400,
         10,
         'No of peaks : ' + str(len(peaks)),
         bbox={
             'facecolor': 'red',
             'alpha': 0.5,
Exemple #41
0
from obspy.signal.trigger import plot_trigger


# Retrieve waveforms via ArcLink
client = Client(host="erde.geophysik.uni-muenchen.de", port=18001,
                user="******")
t = obspy.UTCDateTime("2009-08-24 00:19:45")
st = client.get_waveforms('BW', 'RTSH', '', 'EHZ', t, t + 50)

# For convenience
tr = st[0]  # only one trace in mseed volume
df = tr.stats.sampling_rate

# Characteristic function and trigger onsets
cft = recursive_sta_lta(tr.data, int(2.5 * df), int(10. * df))
on_of = trigger_onset(cft, 3.5, 0.5)

# Plotting the results
ax = plt.subplot(211)
plt.plot(tr.data, 'k')
ymin, ymax = ax.get_ylim()
plt.vlines(on_of[:, 0], ymin, ymax, color='r', linewidth=2)
plt.vlines(on_of[:, 1], ymin, ymax, color='b', linewidth=2)
plt.subplot(212, sharex=ax)
plt.plot(cft, 'k')
plt.hlines([3.5, 0.5], 0, len(cft), color=['r', 'b'], linestyle='--')
plt.axis('tight')
plt.show()

plot_trigger(tr, cft, 3.5, 0.5, show=True)
Exemple #42
0
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter

a = [ 110, 220, 440, 880, 1760, 3520, 7040, 14080 ]
b = [ 110, 165.0, 247.5, 371.25, 556.875, 835.3125, 1252.96875, 1879.453125, 2819.1796875, 4228.76953125, 6343.154296875, 9514.7314453125, 14272.097167969 ]


plt.figure(figsize=(13,8))
plt.grid(True, 'both')
plt.xscale('log')
plt.gca().xaxis.set_major_formatter(FormatStrFormatter('%.d'))

plt.vlines(a, 0, 1, 'b')
plt.vlines(b, 0, 1, 'g')

for i, x in enumerate(a):
  plt.text(x, 1.1, "A ({} Hz)".format(x), rotation=90, fontsize=12)

c = ['A', 'E', 'B', 'F#', 'C#', 'Ab', 'Eb', 'Bb', 'F', 'C', 'G', 'D', 'A??']
for i, x in enumerate(b):
  plt.text(x, -0.15, '{} ({} Hz)'.format(c[i], round(x, 3)), rotation=90, verticalalignment='top', fontsize=12)

plt.tight_layout()
plt.savefig('./class1/comma.png', dpi=300)
Exemple #43
0
     infile = open(filename, 'r')  # Open file for reading
     line = infile.readline()      # Read first line
     # Read x and y coordinates from the file and store in lists   
     Zahlen[i] = []
     for line in infile:
         words = line.split()      # Split line into words     
         Zahlen[i].append(float(words[0]))
         
     infile.close()
    

plt.ylim(0.5,3*10**3)
plt.yscale('log')
plt.plot(x,y,".",color = "blue",label = "aktive Messwerte")  
plt.plot(x_d,y_d,".",color = "gray",label = "deaktivierte Messwerte")
plt.plot(x,f,color ="darkred", label = "Model")
for i in range(Anzahl[0]):
    plt.plot(x,Zahlen[i],label = Dateinamen[i])
plt.vlines(Literatur_FEP2, ymin = 10, ymax = 500,label = "Literatur FEP2 1332.5 keV",color = "lime")
plt.vlines(Literatur_FEP1, ymin = 10, ymax = 500,label = "Literatur FEP1 1173.2 keV",color = "lime",linestyle = "--")
plt.vlines(Compton2, ymin = 10, ymax = 500,label = "Compon-Kante FEP2")
plt.vlines(Compton1, ymin = 10, ymax = 500, linestyle = "--",label = "Compon-Kante FEP1")
plt.vlines(60, ymin = 80, ymax = 1000,label = "Blei-Röntgen-Strahlung")
plt.vlines(Backscatter2, ymin = 100, ymax = 1000,color = "red",label = "Backscatter-Kante FEP2")
plt.vlines(Backscatter1, ymin = 100, ymax = 1000,color = "red", linestyle = "--",label = "Backscatter-Kante FEP1")

plt.legend(loc='lower left',ncol=2,prop={'size': 7})
plt.xlabel("Energie [keV]")
plt.ylabel("Zählungen")
plt.savefig(Probe + ".png")
plt.show()
    def evaluate_prediction(self, start_date=None, end_date=None, nshares = None):
        
        # Default start date is one year before end of data
        # Default end date is end date of data
        if start_date is None:
            start_date = self.max_date - pd.DateOffset(years=1)
        if end_date is None:
            end_date = self.max_date
            
        start_date, end_date = self.handle_dates(start_date, end_date)
        
        # Training data starts self.training_years years before start date and goes up to start date
        train = self.stock[(self.stock['Date'] < start_date.date()) & 
                           (self.stock['Date'] > (start_date - pd.DateOffset(years=self.training_years)).date())]
        
        # Testing data is specified in the range
        test = self.stock[(self.stock['Date'] >= start_date.date()) & (self.stock['Date'] <= end_date.date())]
        
        # Create and train the model
        model = self.create_model()
        model.fit(train)
        
        # Make a future dataframe and predictions
        future = model.make_future_dataframe(periods = 365, freq='D')
        future = model.predict(future)
        
        # Merge predictions with the known values
        test = pd.merge(test, future, on = 'ds', how = 'inner')

        train = pd.merge(train, future, on = 'ds', how = 'inner')
        
        # Calculate the differences between consecutive measurements
        test['pred_diff'] = test['yhat'].diff()
        test['real_diff'] = test['y'].diff()
        
        # Correct is when we predicted the correct direction
        test['correct'] = (np.sign(test['pred_diff']) == np.sign(test['real_diff'])) * 1
        
        # Accuracy when we predict increase and decrease
        increase_accuracy = 100 * np.mean(test[test['pred_diff'] > 0]['correct'])
        decrease_accuracy = 100 * np.mean(test[test['pred_diff'] < 0]['correct'])

        # Calculate mean absolute error
        test_errors = abs(test['y'] - test['yhat'])
        test_mean_error = np.mean(test_errors)

        train_errors = abs(train['y'] - train['yhat'])
        train_mean_error = np.mean(train_errors)

        # Calculate percentage of time actual value within prediction range
        test['in_range'] = False

        for i in test.index:
            if (test.ix[i, 'y'] < test.ix[i, 'yhat_upper']) & (test.ix[i, 'y'] > test.ix[i, 'yhat_lower']):
                test.ix[i, 'in_range'] = True

        in_range_accuracy = 100 * np.mean(test['in_range'])

        if not nshares:

            # Date range of predictions
            print('\nPrediction Range: {} to {}.'.format(start_date.date(),
                end_date.date()))

            # Final prediction vs actual value
            print('\nPredicted price on {} = VNĐ{:.2f}.'.format(max(future['ds']).date(), future.ix[len(future) - 1, 'yhat']))
            print('Actual price on    {} = VNĐ{:.2f}.\n'.format(max(test['ds']).date(), test.ix[len(test) - 1, 'y']))

            print('Average Absolute Error on Training Data = VNĐ{:.2f}.'.format(train_mean_error))
            print('Average Absolute Error on Testing  Data = VNĐ{:.2f}.\n'.format(test_mean_error))

            # Direction accuracy
            print('When the model predicted an increase, the price increased {:.2f}% of the time.'.format(increase_accuracy))
            print('When the model predicted a  decrease, the price decreased  {:.2f}% of the time.\n'.format(decrease_accuracy))

            print('The actual value was within the {:d}% confidence interval {:.2f}% of the time.'.format(int(100 * model.interval_width), in_range_accuracy))


             # Reset the plot
            self.reset_plot()
            
            # Set up the plot
            fig, ax = plt.subplots(1, 1)

            # Plot the actual values
            ax.plot(train['ds'], train['y'], 'ko-', linewidth = 1.4, alpha = 0.8, ms = 1.8, label = 'Observations')
            ax.plot(test['ds'], test['y'], 'ko-', linewidth = 1.4, alpha = 0.8, ms = 1.8, label = 'Observations')
            
            # Plot the predicted values
            ax.plot(future['ds'], future['yhat'], 'navy', linewidth = 2.4, label = 'Predicted') 

            # Plot the uncertainty interval as ribbon
            ax.fill_between(future['ds'].dt.to_pydatetime(), future['yhat_upper'], future['yhat_lower'], alpha = 0.6, 
                           facecolor = 'gold', edgecolor = 'k', linewidth = 1.4, label = 'Confidence Interval')

            # Put a vertical line at the start of predictions
            plt.vlines(x=min(test['ds']).date(), ymin=min(future['yhat_lower']), ymax=max(future['yhat_upper']), colors = 'r',
                       linestyles='dashed', label = 'Prediction Start')

            # Plot formatting
            plt.legend(loc = 2, prop={'size': 8})  plt.xlabel('Date')  plt.ylabel('Price VNĐ') 
            plt.grid(linewidth=0.6, alpha = 0.6)
                       
            plt.title('{} Model Evaluation from {} to {}.'.format(self.symbol,
                start_date.date(), end_date.date())) 
            plt.show() 

        
        # If a number of shares is specified, play the game
        elif nshares:
            
            # Only playing the stocks when we predict the stock will increase
            test_pred_increase = test[test['pred_diff'] > 0]
            
            test_pred_increase.reset_index(inplace=True)
            prediction_profit = []
            
            # Iterate through all the predictions and calculate profit from playing
            for i, correct in enumerate(test_pred_increase['correct']):
                
                # If we predicted up and the price goes up, we gain the difference
                if correct == 1:
                    prediction_profit.append(nshares * test_pred_increase.ix[i, 'real_diff'])
                # If we predicted up and the price goes down, we lose the difference
                else:
                    prediction_profit.append(nshares * test_pred_increase.ix[i, 'real_diff'])
            
            test_pred_increase['pred_profit'] = prediction_profit
            
            # Put the profit into the test dataframe
            test = pd.merge(test, test_pred_increase[['ds', 'pred_profit']], on = 'ds', how = 'left')
            test.ix[0, 'pred_profit'] = 0
        
            # Profit for either method at all dates
            test['pred_profit'] = test['pred_profit'].cumsum().ffill()
            test['hold_profit'] = nshares * (test['y'] - float(test.ix[0, 'y']))
            
            # Display information
            print('You played the stock market in {} from {} to {} with {} shares.\n'.format(
                self.symbol, start_date.date(), end_date.date(), nshares))
            
            print('When the model predicted an increase, the price increased {:.2f}% of the time.'.format(increase_accuracy))
            print('When the model predicted a  decrease, the price decreased  {:.2f}% of the time.\n'.format(decrease_accuracy))

            # Display some friendly information about the perils of playing the stock market
            print('The total profit using the Prophet model = VNĐ{:.2f}.'.format(np.sum(prediction_profit)))
            print('The Buy and Hold strategy profit =         VNĐ{:.2f}.'.format(float(test.ix[len(test) - 1, 'hold_profit'])))
            print('\nThanks for playing the stock market!\n')
            
           
            
            # Plot the predicted and actual profits over time
            self.reset_plot()
            
            # Final profit and final smart used for locating text
            final_profit = test.ix[len(test) - 1, 'pred_profit']
            final_smart = test.ix[len(test) - 1, 'hold_profit']

            # text location
            last_date = test.ix[len(test) - 1, 'ds']
            text_location = (last_date - pd.DateOffset(months = 1)).date()

            plt.style.use('dark_background')

            # Plot smart profits
            plt.plot(test['ds'], test['hold_profit'], 'b',
                     linewidth = 1.8, label = 'Buy and Hold Strategy') 

            # Plot prediction profits
            plt.plot(test['ds'], test['pred_profit'], 
                     color = 'g' if final_profit > 0 else 'r',
                     linewidth = 1.8, label = 'Prediction Strategy')

            # Display final values on graph
            plt.text(x = text_location, 
                     y =  final_profit + (final_profit / 40),
                     s = 'VNĐ%d' % final_profit,
                    color = 'g' if final_profit > 0 else 'r',
                    size = 18)
            
            plt.text(x = text_location, 
                     y =  final_smart + (final_smart / 40),
                     s = 'VNĐ%d' % final_smart,
                    color = 'g' if final_smart > 0 else 'r',
                    size = 18) 

            # Plot formatting
            plt.ylabel('Profit  (US VNĐ)')  plt.xlabel('Date')  
            plt.title('Predicted versus Buy and Hold Profits') 
            plt.legend(loc = 2, prop={'size': 10}) 
            plt.grid(alpha=0.2)  
            plt.show()
# %%
window = 7
title = "Daily confirmed number of positive patients by gender ({} day rolling ave)".format(
    window)
ax = df_by_gender[['M', 'F'
                   ]].rolling(window).mean().plot(title=title,
                                                  grid=True,
                                                  cmap=plt.get_cmap("tab10"))
ax.legend(bbox_to_anchor=(0.05, 1), loc='upper left')
ax.xaxis.set_major_locator(mdates.MonthLocator())

reference_dates = pd.to_datetime(
    ['2020-02-04', '2020-03-03', '2020-03-31', '2020-04-28'])
plt.ylabel("Confirmed number of positive patients")
plt.vlines(reference_dates, 0, 100, "red", linestyles='dashed')
plt.savefig(os.path.join('plots', title))
# plt.show()

# %%
df_by_age = df_tokyo_log.groupby(['公表_年月日',
                                  '患者_年代']).count()['都道府県名'].reset_index()
df_by_age = df_by_age.rename(columns={
    '公表_年月日': 'date',
    '患者_年代': 'age',
    '都道府県名': 'count'
})
df_by_age = df_by_age.pivot_table('count', 'date', 'age').fillna(0)
df_by_age = df_by_age.rename(
    columns={
        '10代': '15',
plt.hlines(y=[3.29, 3.39, 3.50],
           xmin=0,
           xmax=1750,
           lw=1,
           linestyles='dashdot',
           label='sleep-in')
for i in range(len(Same_DOD_Catalogs)):
    plt.hlines(y=Same_DOD_Catalogs[i][2],
               xmin=0,
               xmax=1750,
               lw=0.8,
               linestyles='dotted',
               label='sleep-in')
    plt.vlines(x=Same_DOD_Catalogs[i][0],
               ymin=2.75,
               ymax=4,
               lw=0.8,
               linestyles='dotted',
               label='sleep-in')
    ax1.scatter(Same_DOD_Catalogs[i][0],
                Same_DOD_Catalogs[i][2],
                c='red',
                s=10)
plt.grid()

# row:2 column:1 lower
plotarea_x_max = 6.62273456
ax2 = fig.add_subplot(212,
                      xlabel='discharge [Wh]',
                      ylabel='Voltage [V]',
                      xlim=(0, plotarea_x_max),
                      ylim=(2.75, 4.2))
Exemple #47
0
import sys
import matplotlib.pyplot as plt

f = open(sys.argv[1])
spikes = {}
first = True
for line in f:
    if first:
        first = False
    else:
        line = line.rstrip()
        (time, number) = line.split(",")
        try:
            spikes[number].append(time)
        except KeyError:
            spikes[number] = []
            spikes[number].append(time)

for number, times in spikes.iteritems():
    plt.vlines(times, int(number) - 0.45, int(number) + 0.45)

plt.show()
Exemple #48
0
def pvdiagram(self,
              outname,
              data=None,
              header=None,
              ax=None,
              outformat='pdf',
              color=True,
              cmap='Blues',
              vmin=None,
              vmax=None,
              vsys=0,
              contour=True,
              clevels=None,
              ccolor='k',
              pa=None,
              vrel=False,
              logscale=False,
              x_offset=False,
              ratio=1.2,
              prop_vkep=None,
              fontsize=14,
              lw=1,
              clip=None,
              plot_res=True,
              inmode='fits',
              xranges=[],
              yranges=[],
              ln_hor=True,
              ln_var=True,
              alpha=None,
              colorbar=False,
              cbaroptions=('right', '3%', '0%', r'(Jy beam$^{-1}$)')):
    '''
	Draw a PV diagram.

	Args:
	 - outname:
	'''

    # Modules
    import copy
    import matplotlib as mpl
    from mpl_toolkits.axes_grid1.inset_locator import inset_axes

    # format
    formatlist = np.array(['eps', 'pdf', 'png', 'jpeg'])

    # properties of plots
    #mpl.use('Agg')
    plt.rcParams[
        'font.family'] = 'Arial'  # font (Times New Roman, Helvetica, Arial)
    plt.rcParams[
        'xtick.direction'] = 'in'  # directions of x ticks ('in'), ('out') or ('inout')
    plt.rcParams[
        'ytick.direction'] = 'in'  # directions of y ticks ('in'), ('out') or ('inout')
    plt.rcParams['font.size'] = fontsize  # fontsize

    def change_aspect_ratio(ax, ratio):
        '''
		This function change aspect ratio of figure.
		Parameters:
		    ax: ax (matplotlit.pyplot.subplots())
		        Axes object
		    ratio: float or int
		        relative x axis width compared to y axis width.
		'''
        aspect = (1 / ratio) * (ax.get_xlim()[1] - ax.get_xlim()[0]) / (
            ax.get_ylim()[1] - ax.get_ylim()[0])
        aspect = np.abs(aspect)
        aspect = float(aspect)
        ax.set_aspect(aspect)

    # output file
    if (outformat == formatlist).any():
        outname = outname + '.' + outformat
    else:
        print('ERROR\tsingleim_to_fig: Outformat is wrong.')
        return

    # Input
    if inmode == 'data':
        if data is None:
            print("inmode ='data' is selected. data must be provided.")
            return
        naxis = len(data.shape)
    else:
        data = np.squeeze(self.data)
        header = self.header
        naxis = self.naxis

    # figures
    if ax:
        pass
    else:
        fig = plt.figure(figsize=(11.69, 8.27))  # figsize=(11.69,8.27)
        ax = fig.add_subplot(111)

    # Read
    xaxis = self.xaxis.copy()
    vaxis = self.vaxis.copy()
    delx = self.delx
    delv = self.delv
    nx = len(xaxis)
    nv = len(vaxis)

    # Beam
    bmaj, bmin, bpa = self.beam

    if self.res_off:
        res_off = self.res_off
    else:
        # Resolution along offset axis
        if self.pa:
            pa = self.pa

        if pa:
            # an ellipse of the beam
            # (x/bmin)**2 + (y/bmaj)**2 = 1
            # y = x*tan(theta)
            # --> solve to get resolution in the direction of pv cut with P.A.=pa
            del_pa = pa - bpa
            del_pa = del_pa * np.pi / 180.  # radian
            term_sin = (np.sin(del_pa) / bmin)**2.
            term_cos = (np.cos(del_pa) / bmaj)**2.
            res_off = np.sqrt(1. / (term_sin + term_cos))
        else:
            res_off = bmaj

    # relative velocity or LSRK
    offlabel = r'$\mathrm{Offset\ (arcsec)}$'
    if vrel:
        vaxis = vaxis - vsys
        vlabel = r'$\mathrm{Relative\ velocity\ (km\ s^{-1})}$'
        vcenter = 0
    else:
        vlabel = r'$\mathrm{LSR\ velocity\ (km\ s^{-1})}$'
        vcenter = vsys

    # set extent of an self
    offmin = xaxis[0] - delx * 0.5
    offmax = xaxis[-1] + delx * 0.5
    velmin = vaxis[0] - delv * 0.5
    velmax = vaxis[-1] + delv * 0.5

    # set axes
    if x_offset:
        extent = (offmin, offmax, velmin, velmax)
        xlabel = offlabel
        ylabel = vlabel
        hline_params = [vsys, offmin, offmax]
        vline_params = [0., velmin, velmax]
        res_x = res_off
        res_y = delv
    else:
        data = data.T
        extent = (velmin, velmax, offmin, offmax)
        xlabel = vlabel
        ylabel = offlabel
        hline_params = [0., velmin, velmax]
        vline_params = [vcenter, offmin, offmax]
        res_x = delv
        res_y = res_off

    # set colorscale
    if vmax:
        pass
    else:
        vmax = np.nanmax(data)

    # logscale
    if logscale:
        norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax)
    else:
        norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)

    # clip data at some value
    data_color = copy.copy(data)
    if clip:
        data_color[np.where(data < clip)] = np.nan

    # plot images
    if color:
        imcolor = ax.imshow(data_color,
                            cmap=cmap,
                            origin='lower',
                            extent=extent,
                            norm=norm,
                            alpha=alpha)

        # color bar
        if colorbar:
            cbar_loc, cbar_wd, cbar_pad, cbar_lbl = cbaroptions
            if cbar_loc != 'right':
                print('WARNING\tpvdiagram: only right is supported for \
					colorbar location. Your input is ignored.')
            axin_cb = inset_axes(
                ax,
                width=cbar_wd,
                height='100%',
                loc='lower left',
                bbox_to_anchor=(1.0 + float(cbar_pad.strip('%')) * 0.01, 0.,
                                1., 1.),
                bbox_transform=ax.transAxes,
                borderpad=0)
            cbar = plt.colorbar(imcolor, cax=axin_cb)
            cbar.set_label(cbar_lbl)
            #divider = make_axes_locatable(ax)
            #cax     = divider.append_axes(cbar_loc, size=cbar_wd, pad=cbar_pad)
            #cbar    = plt.colorbar(imcolor, cax=cax )#, ax = ax, orientation=cbar_loc, aspect=float(cbar_wd), pad=float(cbar_pad))
            #cbar.set_label(cbar_lbl)

    if contour:
        imcont = ax.contour(data,
                            colors=ccolor,
                            origin='lower',
                            extent=extent,
                            levels=clevels,
                            linewidths=lw,
                            alpha=alpha)

    # axis labels
    ax.set_xlabel(xlabel, fontsize=fontsize)
    ax.set_ylabel(ylabel, fontsize=fontsize)

    # set xlim, ylim
    if len(xranges) == 0:
        ax.set_xlim(extent[0], extent[1])
    elif len(xranges) == 2:
        xmin, xmax = xranges
        ax.set_xlim(xmin, xmax)
    else:
        print('WARRING: Input xranges is wrong. Must be [xmin, xmax].')
        ax.set_xlim(extent[0], extent[1])

    if len(yranges) == 0:
        ax.set_ylim(extent[2], extent[3])
    elif len(yranges) == 2:
        ymin, ymax = yranges
        ax.set_ylim(ymin, ymax)
    else:
        print('WARRING: Input yranges is wrong. Must be [ymin, ymax].')
        ax.set_ylim(extent[2], extent[3])

    # lines showing offset 0 and relative velocity 0
    if ln_hor:
        xline = plt.hlines(hline_params[0],
                           hline_params[1],
                           hline_params[2],
                           ccolor,
                           linestyles='dashed',
                           linewidths=1.)
    if ln_var:
        yline = plt.vlines(vline_params[0],
                           vline_params[1],
                           vline_params[2],
                           ccolor,
                           linestyles='dashed',
                           linewidths=1.)

    ax.tick_params(which='both',
                   direction='in',
                   bottom=True,
                   top=True,
                   left=True,
                   right=True,
                   pad=9,
                   labelsize=fontsize)

    # plot resolutions
    if plot_res:
        # x axis
        #print (res_x, res_y)
        res_x_plt, res_y_plt = ax.transLimits.transform(
            (res_x * 0.5, res_y * 0.5)) - ax.transLimits.transform(
                (0, 0))  # data --> Axes coordinate
        ax.errorbar(0.1,
                    0.1,
                    xerr=res_x_plt,
                    yerr=res_y_plt,
                    color=ccolor,
                    capsize=3,
                    capthick=1.,
                    elinewidth=1.,
                    transform=ax.transAxes)

    # aspect ratio
    if ratio:
        change_aspect_ratio(ax, ratio)

    # save figure
    plt.savefig(outname, transparent=True)

    return ax
y, sr = librosa.load(sample)
#y_harmonic, y_percussive = librosa.effects.hpss(y)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
print(beats.shape)
onset_env = librosa.onset.onset_strength(y, sr=sr, aggregate=np.median)
hop_length = 512
plt.figure(figsize=(8, 4))
times = librosa.frames_to_time(np.arange(len(onset_env)),
                               sr=sr,
                               hop_length=hop_length)
plt.plot(times, librosa.util.normalize(onset_env), label='Onset strength')
plt.vlines(times[beats],
           0,
           1,
           alpha=0.5,
           color='r',
           linestyle='--',
           label='Beats')
print(times[beats])

#### madmom
#proc = BeatTrackingProcessor(fps=100)
#proc = bt.DBNBeatTrackingProcessor(fps=100)
#act = bt.RNNBeatProcessor()(sample2)
proc = dbt.DBNDownBeatTrackingProcessor(beats_per_bar=[4, 4], fps=120)
act = dbt.RNNDownBeatProcessor()(sample)
plt.vlines(proc(act),
           0,
           1,
           alpha=0.5,
Exemple #50
0
# mcmc
mcmc = pm.MCMC([p_A, p_B, delta, data_A, data_B])
mcmc.sample(25000, 5000)

p_A_samples = mcmc.trace("p_A")[:]
p_B_samples = mcmc.trace("p_B")[:]
delta_samples = mcmc.trace("delta")[:]

ax = plt.subplot(311)

# Ploting the posteriors

plt.xlim(0, .1)
plt.hist(p_A_samples, histtype='stepfilled', bins=25, alpha=0.85,
         label="posterior of $p_A$", color="#A60628", normed=True)
plt.vlines(true_p_A, 0, 80, linestyle="--", label="true $p_A$ (unknown)")
plt.legend(loc="upper right")
plt.title("Posterior distributions of $p_A$, $p_B$, and delta unknowns")

ax = plt.subplot(312)

plt.xlim(0, .1)
plt.hist(p_B_samples, histtype='stepfilled', bins=25, alpha=0.85,
         label="posterior of $p_B$", color="#467821", normed=True)
plt.vlines(true_p_B, 0, 80, linestyle="--", label="true $p_B$ (unknown)")
plt.legend(loc="upper right")

ax = plt.subplot(313)
plt.hist(delta_samples, histtype='stepfilled', bins=30, alpha=0.85,
         label="posterior of delta", color="#7A68A6", normed=True)
plt.vlines(true_p_A - true_p_B, 0, 60, linestyle="--",
Exemple #51
0
           ],
           fontsize=10)
plt.title("Standard Normal Distribution", fontsize=20)
plt.ylabel("pdf", fontsize=15)
plt.show()

x = np.linspace(-5, 5, 1000)

y = stats.norm.cdf(x)

plt.figure(figsize=(12, 8))
plt.margins(x=0, y=0)
plt.plot(x, y, color="black", linewidth=2)
plt.vlines(x=[left, right],
           ymin=0,
           ymax=[stats.norm.cdf(left),
                 stats.norm.cdf(right)],
           linestyle="--")
plt.hlines(y=[stats.norm.cdf(left),
              stats.norm.cdf(right)],
           xmin=-5,
           xmax=[left, right],
           linestyle="--")
plt.grid()
plt.xticks(np.arange(-4, 5, 1),
           labels=[
               "-4σ=-4", "-3σ=-3", "-2σ=-2", "-1σ=-1", "mu=0", "1σ=1", "2σ=2",
               "3σ=3", "4σ=4"
           ],
           fontsize=15)
plt.yticks(np.arange(0, 1.1, 0.05), fontsize=10)
Exemple #52
0
def nested_sampling_results(ns_object, burnin=0.4, bins=None, save=False,
                            output_dir='/', plot=False):
    """ Shows the results of the Nested Sampling, summary, parameters with 
    errors, walk and corner plots.
    """
    res = ns_object
    nsamples = res.samples.shape[0]
    indburnin = int(np.percentile(np.array(range(nsamples)), burnin * 100))

    print(res.summary())

    print(
        '\nNatural log of prior volume and Weight corresponding to each sample')
    if save or plot:
        plt.figure(figsize=(12, 4))
        plt.subplot(1, 2, 1)
        plt.plot(res.logvol, '.', alpha=0.5, color='gray')
        plt.xlabel('samples')
        plt.ylabel('logvol')
        plt.vlines(indburnin, res.logvol.min(), res.logvol.max(),
                   linestyles='dotted')
        plt.subplot(1, 2, 2)
        plt.plot(res.weights, '.', alpha=0.5, color='gray')
        plt.xlabel('samples')
        plt.ylabel('weights')
        plt.vlines(indburnin, res.weights.min(), res.weights.max(),
                   linestyles='dotted')
        if plot:
            plt.show()
    
        plt.savefig(output_dir+'Nested_results.pdf')
            
        print("\nWalk plots before the burnin")
        show_walk_plot(np.expand_dims(res.samples, axis=0))
        if burnin > 0:
            print("\nWalk plots after the burnin")
            show_walk_plot(np.expand_dims(res.samples[indburnin:], axis=0))
        plt.savefig(output_dir+'Nested_walk_plots.pdf')
        
    mean, cov = nestle.mean_and_cov(res.samples[indburnin:],
                                    res.weights[indburnin:])
    print("\nWeighted mean +- sqrt(covariance)")
    print("Radius = {:.3f} +/- {:.3f}".format(mean[0], np.sqrt(cov[0, 0])))
    print("Theta = {:.3f} +/- {:.3f}".format(mean[1], np.sqrt(cov[1, 1])))
    print("Flux = {:.3f} +/- {:.3f}".format(mean[2], np.sqrt(cov[2, 2])))

    if save:
        with open(output_dir+'Nested_sampling.txt', "w") as f:
            f.write('#################################\n')
            f.write('####   CONFIDENCE INTERVALS   ###\n')
            f.write('#################################\n')
            f.write(' \n')
            f.write('Results of the NESTED SAMPLING fit\n')
            f.write('----------------------------------\n ')
            f.write(' \n')
            f.write("\nWeighted mean +- sqrt(covariance)\n")
            f.write("Radius = {:.3f} +/- {:.3f}\n".format(mean[0], np.sqrt(cov[0, 0])))
            f.write("Theta = {:.3f} +/- {:.3f}\n".format(mean[1], np.sqrt(cov[1, 1])))
            f.write("Flux = {:.3f} +/- {:.3f}\n".format(mean[2], np.sqrt(cov[2, 2])))
                        
    if bins is None:
        bins = int(np.sqrt(res.samples[indburnin:].shape[0]))
        print("\nHist bins =", bins)
    
    if save or plot:
        ranges = None
        fig = corner.corner(res.samples[indburnin:], bins=bins,
                            labels=["$r$", r"$\theta$", "$f$"],
                            weights=res.weights[indburnin:], range=ranges,
                            plot_contours=True)
        fig.set_size_inches(8, 8)
    if save:
        plt.savefig(output_dir+'Nested_corner.pdf')
            
    print('\nConfidence intervals')
    if save or plot:
        _ = confidence(res.samples[indburnin:], cfd=68, bins=bins,
                       weights=res.weights[indburnin:],
                       gaussian_fit=True, verbose=True, save=False)
                   
    if save:
        plt.savefig(output_dir+'Nested_confi_hist_flux_r_theta_gaussfit.pdf')

    final_res = np.array([[mean[0], np.sqrt(cov[0, 0])],
                          [mean[1], np.sqrt(cov[1, 1])],
                          [mean[2], np.sqrt(cov[2, 2])]])
    return final_res                     
plt.plot(peaks, x[peaks], "x")
plt.show()

# Especially for noisy signals peaks can be easily grouped by their
# prominence (see `peak_prominences`). E.g. we can select all peaks except
# for the mentioned QRS complexes by limiting the allowed prominenence to 0.6.

peaks, properties = find_peaks(x, prominence=(None, 0.6))
properties["prominences"].max()
# 0.5049999999999999
plt.plot(x)
plt.plot(peaks, x[peaks], "x")
plt.show()

# And finally let's examine a different section of the ECG which contains
# beat forms of different shape. To select only the atypical heart beats we
# combine two conditions: a minimal prominence of 1 and width of at least 20
# samples.

x = electrocardiogram()[17000:18000]
peaks, properties = find_peaks(x, prominence=1, width=20)
properties["prominences"], properties["widths"]
# (array([1.495, 2.3  ]), array([36.93773946, 39.32723577]))
plt.plot(x)
plt.plot(peaks, x[peaks], "x")
plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"],
           ymax = x[peaks], color = "C1")
plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"],
           xmax=properties["right_ips"], color = "C1")
plt.show()
Exemple #54
0
    q1, m, q3 = np.percentile(month.tolist(), [25, 50, 75], axis=0)
    quartile1.append(q1)
    medians.append(m)
    quartile3.append(q3)

whiskers = np.array([
    adjacent_values(sorted_array, q1, q3)
    for sorted_array, q1, q3 in zip(ages, quartile1, quartile3)
])
whiskersMin, whiskersMax = whiskers[1:, 0], whiskers[1:, 1]

inds = np.arange(0, len(medians))
plt.scatter(inds[1:], medians[1:], marker='o', color='white', s=30, zorder=3)
plt.vlines(inds[1:],
           quartile1[1:],
           quartile3[1:],
           color='k',
           linestyle='-',
           lw=5)
plt.vlines(inds[1:], whiskersMin, whiskersMax, color='k', linestyle='-', lw=1)

plt.axis([0, len(ages), 0, max_age])
plt.xticks(np.arange(labels.size), labels, rotation=60)
plt.xlabel("Calendar Month")
plt.ylabel(f"{args['issue_type']} Age (days)", labelpad=10)

# Show the grid lines as dark grey lines
plt.grid(axis='y',
         b=True,
         which='major',
         color='#666666',
         linestyle='-',
def PDFm(nest,
         mu=0,
         sigma=1,
         data=0,
         outlier=0,
         distribuition='normal',
         grid=False,
         points=False):
    """
    Returns a generic plot from PDF of a selected distribuition based on PDFm discretization.

    Parameters
    ----------
    nest: int
        The number of estimation points.
    mu: int, optional
        Specifies the mean of distribuition.
        Defaut is 0.
    sigma: int, optional
        Specifies the standard desviation of a distribuition.
        Defaut is 1.
    data: int, optional
        If data > 0, a randon data will be inserted insted analitcs data.
        Defaut is 0.
    outlier: int, optional
        Is the point of an outlier event, e.g outlier = 50 will put an event in -50 and +50 if mu = 0.
        Defaut is 0
    distribuition: str, optional
        Select the distribuition to analyze.
        ('normal', 'lognormal')
        Defaut is 'normal'
    points: str, optional
        Show the estimation points along the follow plots ('PDF' or 'CDF')
        Defaut is False.
    grid: bool, optional
        If True, a grid of discretization will be show in the plot.
        Defaut is False.
    """

    import numpy as np
    import scipy.stats as sp
    import matplotlib.pyplot as plt
    from scipy.interpolate import interp1d

    ngrid = int(1e6)
    if not nest % 2:
        nest = nest - 1
    if distribuition == 'normal':

        outlier_inf = outlier_sup = outlier
        if not data:
            a, b = sp.norm.interval(0.9999, loc=mu, scale=sigma)
            a, b = a - outlier_inf, b + outlier_sup
            x = np.linspace(a, b, ngrid)
            y = sp.norm.pdf(x, loc=mu, scale=sigma)

            X1 = np.linspace(a, mu, ngrid)
            Y1 = sp.norm.pdf(X1, loc=mu, scale=sigma)
            interp = interp1d(Y1, X1)
            y1 = np.linspace(Y1[0], Y1[-1], nest // 2 + 1)
            x1 = interp(y1)

            X2 = np.linspace(mu, b, ngrid)
            Y2 = sp.norm.pdf(X2, loc=mu, scale=sigma)
            interp = interp1d(Y2, X2)
            y2 = np.flip(y1, 0)
            x2 = interp(y2)
        else:
            d = np.random.normal(mu, sigma, data)
            a, b = min(d) - outlier_inf, max(d) + outlier_sup
            yest, xest = np.histogram(d, bins='fd', normed=True)
            xest = np.mean(np.array([xest[:-1], xest[1:]]), 0)
            M = np.where(yest == max(yest))[0][0]
            m = np.where(yest == min(yest))[0][0]
            interpL = interp1d(yest[:M + 1],
                               xest[:M + 1],
                               assume_sorted=False,
                               fill_value='extrapolate')
            interpH = interp1d(yest[M:],
                               xest[M:],
                               assume_sorted=False,
                               fill_value='extrapolate')

            y1 = np.linspace(yest[m], yest[M], nest // 2 + 1)
            x1 = interpL(y1)

            y2 = np.flip(y1, 0)
            x2 = interpH(y2)

        X = np.concatenate([x1[:-1], x2])
        Y = np.concatenate([y1[:-1], y2])

    elif distribuition == 'lognormal':
        outlier_inf = 0
        outlier_sup = outlier
        if not data:
            mode = np.exp(mu - sigma**2)
            a, b = sp.lognorm.interval(0.9999, sigma, loc=0, scale=np.exp(mu))
            a, b = a - outlier_inf, b + outlier_sup
            x = np.linspace(a, b, ngrid)
            y = sp.lognorm.pdf(x, sigma, loc=0, scale=np.exp(mu))

            X1 = np.linspace(a, mode, ngrid)
            Y1 = sp.lognorm.pdf(X1, sigma, loc=0, scale=np.exp(mu))
            interp = interp1d(Y1, X1)
            y1 = np.linspace(Y1[0], Y1[-1], nest // 2 + 1)
            x1 = interp(y1)

            X2 = np.linspace(mode, b, ngrid)
            Y2 = sp.lognorm.pdf(X2, sigma, loc=0, scale=np.exp(mu))
            interp = interp1d(Y2, X2)
            y2 = np.flip(y1, 0)
            x2 = interp(y2)
        else:
            d = np.random.lognormal(mu, sigma, data)
            a, b = min(d) - outlier_inf, max(d) + outlier_sup
            y, x = np.histogram(d, bins='fd', normed=True)
            x = np.mean(np.array([x[:-1], x[1:]]), 0)
            M = np.where(y == max(y))[0][0]
            m = np.where(y == min(y))[0][0]
            interpL = interp1d(y[:M + 1],
                               x[:M + 1],
                               assume_sorted=False,
                               fill_value='extrapolate')
            interpH = interp1d(y[M:],
                               x[M:],
                               assume_sorted=False,
                               fill_value='extrapolate')

            y1 = np.linspace(y[m], y[M], nest // 2 + 1)
            x1 = interpL(y1)

            y2 = np.flip(y1, 0)
            x2 = interpH(y2)

        X = np.concatenate([x1[:-1], x2])
        Y = np.concatenate([y1[:-1], y2])

    #plt.figure(figsize=(12,8),dpi=200)
    plt.plot(x, y, label='PDF')
    plt.ylabel('Probability', fontsize=30)
    plt.xlabel('x', fontsize=30)
    #plt.title('$\mu$ = %.1f, $\sigma$ = %.1f - PDFm' %(mu,sigma))
    if points:
        plt.plot(X, Y, 'ok', label='PDF points')

    if grid:
        plt.vlines(X, 0, Y, linestyle=':')
        plt.hlines(Y, a, X, linestyle=':')
        plt.plot(X, np.zeros(nest), 'rx', ms=5, label='X points')

    plt.legend(prop={'size': 18})
    plt.tick_params(labelsize=18)
    plt.tight_layout()
    def plot_feature_importances(self, plot_n=15, threshold=None):
        """
        Plots `plot_n` most important features and the cumulative importance of features.
        If `threshold` is provided, prints the number of features needed to reach `threshold` cumulative importance.

        Parameters
        --------
        
        plot_n : int, default = 15
            Number of most important features to plot. Defaults to 15 or the maximum number of features whichever is smaller
        
        threshold : float, between 0 and 1 default = None
            Threshold for printing information about cumulative importances

        """

        if self.record_zero_importance is None:
            raise NotImplementedError(
                'Feature importances have not been determined. Run `idenfity_zero_importance`'
            )

        # Need to adjust number of features if greater than the features in the data
        if plot_n > self.feature_importances.shape[0]:
            plot_n = self.feature_importances.shape[0] - 1

        self.reset_plot()

        # Make a horizontal bar chart of feature importances
        plt.figure(figsize=(10, 6))
        ax = plt.subplot()

        # Need to reverse the index to plot most important on top
        # There might be a more efficient method to accomplish this
        ax.barh(list(reversed(list(self.feature_importances.index[:plot_n]))),
                self.feature_importances['normalized_importance'][:plot_n],
                align='center',
                edgecolor='k')

        # Set the yticks and labels
        ax.set_yticks(
            list(reversed(list(self.feature_importances.index[:plot_n]))))
        ax.set_yticklabels(self.feature_importances['feature'][:plot_n],
                           size=12)

        # Plot labeling
        plt.xlabel('Normalized Importance', size=16)
        plt.title('Feature Importances', size=18)
        plt.show()

        # Cumulative importance plot
        plt.figure(figsize=(6, 4))
        plt.plot(list(range(1,
                            len(self.feature_importances) + 1)),
                 self.feature_importances['cumulative_importance'], 'r-')
        plt.xlabel('Number of Features', size=14)
        plt.ylabel('Cumulative Importance', size=14)
        plt.title('Cumulative Feature Importance', size=16)

        if threshold:

            # Index of minimum number of features needed for cumulative importance threshold
            # np.where returns the index so need to add 1 to have correct number
            importance_index = np.min(
                np.where(self.feature_importances['cumulative_importance'] >
                         threshold))
            plt.vlines(x=importance_index + 1,
                       ymin=0,
                       ymax=1,
                       linestyles='--',
                       colors='blue')
            plt.show()

            print('%d features required for %0.2f of cumulative importance' %
                  (importance_index + 1, threshold))
def plot_matrix(plot_type, density_mx, residues, output_file_base):
    if plot_type == 'k1k2':
        lbound = -6
        ubound = -1
        lbound_ix = 15
        xlabel = 'log$_{10}$(k1, k2)'
    elif plot_type == 'c1c2':
        lbound = 0
        ubound = 10
        lbound_ix = 0
        xlabel = 'log$_{10}$($C_2$, $C_3$)'
    elif plot_type == 'f1f2':
        lbound = 0
        ubound = 100
        lbound_ix = 0
        xlabel = 'log$_{10}$($F_2$, $F_3$)'
    else:
        raise ValueError("Unknown plot type: %s" % plot_type)

    fig = plt.figure(figsize=(2, 3), dpi=300)
    ax = fig.gca()
    ncols = density_mx.shape[0]
    num_reps = 3
    #assert ncols == (len(residues) * num_reps) + len(residues) - 1, \
    #       "Dimensions of density_mx must match numbers of reps/residues"
    ax.imshow(density_mx[:,lbound_ix:,:], interpolation='nearest',
              extent=(lbound, ubound, ncols, 0), aspect='auto')
    # Plot line representing max observed value
    if plot_type == 'c1c2':
        plt.vlines(np.log10(max_nbd_value), ncols, 0, color='gray')
    # Lines separating the different mutants
    lines = np.arange((2*num_reps), ncols, (2*num_reps)+2) + 1
    plt.hlines(lines, lbound, ubound, linewidth=0.5)

    """
    # Plot lines representing starting values
    # FIXME FIXME FIXME
    mut_ix_bounds = [0] + list(lines) + [ncols]
    activator = 'Bid'
    row_ix = 0
    for nbd_ix, nbd_residue in enumerate(residues):
        for rep_num in range(1, num_reps + 1):
            timecourse = \
                    df[(activator, 'NBD', nbd_residue, rep_num, 'VALUE')].values
            f0 = timecourse[0]
            #res_ix_lbound = mut_ix_bounds[nbd_ix]
            #res_ix_ubound = mut_ix_bounds[nbd_ix + 1]
            plt.vlines(np.log10(f0), row_ix, row_ix + 1, color='gray')
            row_ix += 1
        row_ix += 1
    """
    # Ticks for the different mutants
    spacing = 8
    ytick_positions = np.arange(3, ncols, spacing)
    ax.set_yticks(ytick_positions)
    assert len(residues) == len(ytick_positions)
    ax.set_yticklabels(residues)
    # Ticks for the units on the x-axis
    xtick_positions = np.arange(lbound, ubound + 0.1, 1.)
    ax.set_xticks(xtick_positions)
    ax.set_xlabel(xlabel)
    if plot_type == 'c1c2':
        ax.set_ylabel('NBD Position')
    format_axis(ax)
    fig.subplots_adjust(left=0.17, bottom=0.11, top=0.94)
    fig.savefig('%s.pdf' % output_file_base, dpi=1200)
    fig.savefig('%s.png' % output_file_base, dpi=300)
#              fmt='cd', mfc='none', ms=10, mew=2, label=r'$\mathrm{CLT_{A} - CLT_{A}}$ (Event Type)')
plt.errorbar(hdd['trigRateMean'], np.asarray(hdd['tlt']) - np.asarray(hdd['cltNoEdtm']), yerr = calcSumErr(np.asarray(hdd['tltErr']), np.asarray(hdd['cltNoEdtmErr'])), 
             fmt='cd', mfc='none', ms=10, mew=2, label=r'$\mathrm{LT_{E} - CLT_{P}}$')
plt.errorbar(hdd['trigRateMean'], np.asarray(hdd['clt']) - np.asarray(hdd['cltNoEdtm']), yerr = calcSumErr(np.asarray(hdd['cltErr']), np.asarray(hdd['cltNoEdtmErr'])), 
             fmt='g*', mfc='none', ms=10, mew=2, label=r'$\mathrm{CLT_{A} - CLT_{P}}$')

# plt.plot(hmsRate, 100.*(ltClock(hmsBusyMin, hmsRate, edtmRate) - ltPoisson(hmsBusyMin, hmsRate, edtmRate)), 'g--', label='EDTM Non-Poissonian Bias (150 us Busy)')
# plt.plot(hmsRate, 100.*(ltClock(hmsBusyMax, hmsRate, edtmRate) - ltPoisson(hmsBusyMax, hmsRate, edtmRate)), 'm--', label='EDTM Non-Poissonian Bias (300 us Busy)')
# plt.plot(hmsRate, 100.*(ltClock(hmsBusyAvg, hmsRate, edtmRate) - ltPoisson(hmsBusyAvg, hmsRate, edtmRate)), 'r--', label='EDTM Non-Poissonian Bias (225 us Busy)')

# plt.plot(hmsRate, 100.*linDiff(hmsBusyMin, hmsRate, edtmRate), 'c--', label='EDTM Linear Bias (150 us Busy)')
# plt.plot(hmsRate, 100.*linDiff(hmsBusyMax, hmsRate, edtmRate), 'y--', label='EDTM Linear Bias (300 us Busy)')
# plt.plot(hmsRate, 100.*linDiff(hmsBusyAvg, hmsRate, edtmRate), 'g--', label='EDTM Linear Bias (225 us Busy)')

plt.hlines(0, 0, 7500, colors='y', linestyles='-.', label = 'Residiual = 0%')
plt.vlines(100, -0.5, 3.5, colors='g', linestyles='-.', label='EDTM Rate (100 Hz)')

hlfParams, hlfCov =  optimization.curve_fit(linFit, hdd['trigRateMean'][2:7], np.asarray(hdd['tlt'][2:7]) - np.asarray(hdd['cltNoEdtm'][2:7]), sigma = calcSumErr(np.asarray(hdd['tltErr'][2:7]), np.asarray(hdd['cltNoEdtmErr'][2:7])))
plt.plot(hmsRate, linFit(hmsRate, hlfParams[0], hlfParams[1]), 'k-.', label = 'Linear Fit')

hqfParams, hqfCov =  optimization.curve_fit(quadFit, hdd['trigRateMean'], np.asarray(hdd['clt']) - np.asarray(hdd['cltNoEdtm']), sigma = calcSumErr(np.asarray(hdd['cltErr']), np.asarray(hdd['cltNoEdtmErr'])))
plt.plot(hmsRate, quadFit(hmsRate, hqfParams[0], hqfParams[1]), 'm-.', label = 'Second Order Fit')

plt.xlim(0, 7500)
plt.ylim(-0.5, 3.5)
# plt.ylim(-0.5, 0.5)
plt.xlabel('Trigger Rate (Hz)')
plt.ylabel('Residuals (%)')
plt.legend(loc='best', fancybox='True', numpoints=1)
plt.savefig('hmsLiveTime.pdf')
            #In each sudo experiment a random number of back ground and signal
            #events are generated. Both the background and signal events are
            #modeled by a poisson distribution.The expectation of the poisson
            #models is given in the problems sheet.
            BackGroundExpectation = np.random.normal(5.7, 0.4)
            BackGround = np.random.poisson(BackGroundExpectation)
            lumError = 12 / 100
            luminosity = np.random.normal(12, lumError)
            SignalExpectation = i * luminosity
            Signal = np.random.poisson(SignalExpectation)
            TotalEvents = Signal + BackGround

            if TotalEvents > 5:
                Count += 1

        ConfidenceLevel += [Count / N]
        CrossSection += [i]

    return CrossSection, ConfidenceLevel


sigma, conflevel = ToyMonteCarlo(1e3, 0, 1, 0.01)
plt.plot(sigma, conflevel, '.')
plt.hlines(0.95, 0, 1)
plt.vlines(0.41, 0.5, 1)
plt.title('Confidance level against Cross section')
plt.xlabel('Cross section (nb)')
plt.ylabel('Confidance level (%)')
plt.show()
plt.clf()
def linspace(nest,
             mu=0,
             sigma=1,
             outlier=0,
             distribuition='normal',
             data=0,
             points=False,
             grid=False):
    """
    Returns a generic plot of a selected distribuition based on linspace discretization.

    Parameters
    ----------
    nest: int
        The number of estimation points.
    mu: int, optional
        Specifies the mean of distribuition.
        Defaut is 0.
    sigma: int, optional
        Specifies the standard desviation of a distribuition.
        Defaut is 1.
    outlier: int, optional
        Is the point of an outlier event, e.g outlier = 50 will put an event in -50 and +50 if mu = 0.
        Defaut is 0
    distribuition: str, optional
        Select the distribuition to analyze.
        ('normal', 'lognormal')
        Defaut is 'normal'
    data: int, optional
        If data > 0, a randon data will be inserted insted analitcs data.
        Defaut is 0.
    points: bool, optional
        If True, it will plot with the discratization points on its PDF.
        Defaut is False.
    grid: bool, optional
        If True, a grid of discatization will be show in the plot.
        Defaut is False.
    """
    import numpy as np
    import scipy.stats as sp
    import matplotlib.pyplot as plt

    ngrid = int(100e3)

    if distribuition == 'normal':
        outlier_inf = outlier_sup = outlier
        if not data:
            a, b = sp.norm.interval(0.9999, loc=mu, scale=sigma)
        else:
            d = np.random.normal(loc=mu, scale=sigma, size=data)
            a, b = min(d), max(d)

        xgrid = np.linspace(a - outlier_inf, b + outlier_sup, ngrid)
        xest = np.linspace(min(xgrid), max(xgrid), nest)
        ygrid = sp.norm.pdf(xgrid, loc=mu, scale=sigma)
        yest = sp.norm.pdf(xest, loc=mu, scale=sigma)

    elif distribuition == 'lognormal':
        outlier_inf = 0
        outlier_sup = outlier
        if not data:
            a, b = sp.lognorm.interval(0.9999, sigma, loc=0, scale=np.exp(mu))
        else:
            d = np.random.lognormal(mean=mu, sigma=sigma, size=data)
            a, b = min(d), max(d)

        xgrid = np.linspace(a - outlier_inf, b + outlier_sup, ngrid)
        xest = np.linspace(min(xgrid), max(xgrid), nest)
        ygrid = sp.lognorm.pdf(xgrid, sigma, loc=0, scale=np.exp(mu))
        yest = sp.lognorm.pdf(xest, sigma, loc=0, scale=np.exp(mu))

    plt.figure(figsize=(12, 8))
    plt.plot(xgrid, ygrid, label='PDF')

    plt.xlabel('X', fontsize=30)
    plt.ylabel('Probability', fontsize=30)

    if points:
        plt.plot(xest, yest, 'ok', label='Linspace Points')
    if grid:
        plt.vlines(xest, 0, yest, linestyle=':')
        plt.hlines(yest, a - outlier_inf, xest, linestyle=':')
        plt.plot(np.zeros(nest) + a - outlier_inf,
                 yest,
                 'rx',
                 ms=5,
                 label='Y points')
    plt.legend(prop={'size': 18})
    plt.xticks(size=18)
    plt.yticks(size=18)
    #plt.title('$\mu$ = %.1f, $\sigma$ = %.1f - Linspace' %(mu,sigma))
    plt.tight_layout()