def plot_example_psds(example,rate):
    """
    This function creates a figure with 4 lines to show the overall psd for 
    the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
    2 and 3/4)
        
    """

    sleep_stages = ['REM sleep', 'Stage 1 NREM sleep', 'Stage 2 NREM sleep', 'Stage 3 and 4 NREM sleep'];    
    
    plt.figure()
    
    ##YOUR CODE HERE    
    for i in range( len( example[:,0]) ):    
        
        # Apply power spectral density using a Fast Fourier Transform 
        # to generate blocks of data
        psd, frequency = m.psd(example[i, :], NFFT = 512, Fs = rate )
        
        # normalize frequency
        psd = psd / np.sum(psd)
        
        # plot sleep stages
        plt.plot(frequency, psd, label = sleep_stages[i])
        
        # add legend
        plt.ylabel('Normalized Power Spectral Density')
        plt.xlabel('Frequency (Hz)')
    
    plt.xlim(0,20)
    plt.legend(loc=0)
    plt.title('Overall ower Spectral Density for Sleep Stages')
    
    return
Example #2
0
def make_corr1d_fig(dosave=False):
    corr = make_corr_both_hemi()
    lw=2; fs=16
    pl.figure(1)#, figsize=(8, 7))
    pl.clf()
    pl.xlim(4,300)
    pl.ylim(-400,+500)    
    lambda_titles = [r'$20 < \lambda < 30$',
                     r'$30 < \lambda < 40$',
                     r'$\lambda > 40$']
    colors = ['blue','green','red']
    for i in range(3):
        corr1d, rcen = corr_1d_from_2d(corr[i])
        ipdb.set_trace()
        pl.semilogx(rcen, corr1d*rcen**2, lw=lw, color=colors[i])
        #pl.semilogx(rcen, corr1d*rcen**2, 'o', lw=lw, color=colors[i])
    pl.xlabel(r'$s (Mpc)$',fontsize=fs)
    pl.ylabel(r'$s^2 \xi_0(s)$', fontsize=fs)    
    pl.legend(lambda_titles, 'lower left', fontsize=fs+3)
    pl.plot([.1,10000],[0,0],'k--')
    s_bao = 149.28
    pl.plot([s_bao, s_bao],[-9e9,+9e9],'k--')
    pl.text(s_bao*1.03, 420, 'BAO scale')
    pl.text(s_bao*1.03, 370, '%0.1f Mpc'%s_bao)
    if dosave: pl.savefig('xi1d_3bin.pdf')
Example #3
0
def plot_svc(X, y, mysvc, bounds=None, grid=50):
    if bounds is None:
        xmin = np.min(X[:, 0], 0)
        xmax = np.max(X[:, 0], 0)
        ymin = np.min(X[:, 1], 0)
        ymax = np.max(X[:, 1], 0)
    else:
        xmin, ymin = bounds[0], bounds[0]
        xmax, ymax = bounds[1], bounds[1]
    aspect_ratio = (xmax - xmin) / (ymax - ymin)
    xgrid, ygrid = np.meshgrid(np.linspace(xmin, xmax, grid),
                              np.linspace(ymin, ymax, grid))
    plt.gca(aspect=aspect_ratio)
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)
    plt.xticks([])
    plt.yticks([])
    plt.hold(True)
    plt.plot(X[y == 1, 0], X[y == 1, 1], 'bo')
    plt.plot(X[y == -1, 0], X[y == -1, 1], 'ro')
    
    box_xy = np.append(xgrid.reshape(xgrid.size, 1), ygrid.reshape(ygrid.size, 1), 1)
    if mysvc is not None:
        scores = mysvc.decision_function(box_xy)
    else:
        print 'You must have a valid SVC object.'
        return None;
    
    CS=plt.contourf(xgrid, ygrid, scores.reshape(xgrid.shape), alpha=0.5, cmap='jet_r')
    plt.contour(xgrid, ygrid, scores.reshape(xgrid.shape), levels=[0], colors='k', linestyles='solid', linewidths=1.5)
    plt.contour(xgrid, ygrid, scores.reshape(xgrid.shape), levels=[-1,1], colors='k', linestyles='dashed', linewidths=1)
    plt.plot(mysvc.support_vectors_[:,0], mysvc.support_vectors_[:,1], 'ko', markerfacecolor='none', markersize=10)
    CB = plt.colorbar(CS)
def plot_example_spectrograms(example,rate):
    """
    This function creates a figure with spectrogram sublpots to of the four
    sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
    2 and 3/4)
    """
    sleep_stages = ['REM sleep', 'Stage 1 NREM sleep', 'Stage 2 NREM sleep', 'Stage 3 and 4 NREM sleep'];    
    
    plt.figure()
    
    ###YOUR CODE HERE
    for i in range( len(example[:,0]) ):
       
       # plot every sleep stage in a separate plot
       plt.subplot(2,2,i+1)
       
       # plot spectogram
       plt.specgram(example[i, :],NFFT=512,Fs=rate)   
       
       # add legend
       plt.xlabel('Time (Seconds)')
       plt.ylabel('Frequency (Hz)')
       plt.title( 'Spectogram ' + sleep_stages[i] )
       
       plt.ylim(0,60)
       plt.xlim(0,290)
    return
Example #5
0
    def test1():
        x = [0.5]*3
        xbounds = [(-5, 5) for y in x]


        GA = GenAlg(fitcalc1, x, xbounds, popMult=100, bitsPerGene=9, mutation=(1./9.), crossover=0.65, crossN=2, direction='min', maxGens=60, hammingDist=False)
        results = GA.run()
        print "*** DONE ***"
        #print results
        plt.ioff()
        #generate pareto frontier numerically
        x1_ = np.arange(-5., 0., 0.05)
        x2_ = np.arange(-5., 0., 0.05)
        x3_ = np.arange(-5., 0., 0.05)

        pfn = []
        for x1 in x1_:
            for x2 in x2_:
                for x3 in x3_:
                    pfn.append(fitcalc1([x1,x2,x3]))

        pfn.sort(key=lambda x:x[0])
        
        plt.figure()
        i = 0
        for x in results:
            plt.scatter(x[1][0], x[1][1], 20, c='r')

        plt.scatter([x[0] for x in pfn], [x[1] for x in pfn], 1.0, c='b', alpha=0.1)
        plt.xlim([-20,-1])
        plt.ylim([-12, 2])
        plt.draw()
Example #6
0
    def visualization2(self, sp_to_vis=None):
        if sp_to_vis:
            species_ready = list(set(sp_to_vis).intersection(self.all_sp_signatures.keys()))
        else:
            raise Exception('list of driver species must be defined')

        if not species_ready:
            raise Exception('None of the input species is a driver')

        for sp in species_ready:
            # Setting up figure
            plt.figure()
            plt.subplot(313)

            mon_val = OrderedDict()
            signature = self.all_sp_signatures[sp]
            for idx, mon in enumerate(list(set(signature))):
                if mon[0] == 'C':
                    mon_val[self.all_comb[sp][mon] + (-1,)] = idx
                else:
                    mon_val[self.all_comb[sp][mon]] = idx

            mon_rep = [0] * len(signature)
            for i, m in enumerate(signature):
                if m[0] == 'C':
                    mon_rep[i] = mon_val[self.all_comb[sp][m] + (-1,)]
                else:
                    mon_rep[i] = mon_val[self.all_comb[sp][m]]
            # mon_rep = [mon_val[self.all_comb[sp][m]] for m in signature]

            y_pos = numpy.arange(len(mon_val.keys()))
            plt.scatter(self.tspan[1:], mon_rep)
            plt.yticks(y_pos, mon_val.keys())
            plt.ylabel('Monomials', fontsize=16)
            plt.xlabel('Time(s)', fontsize=16)
            plt.xlim(0, self.tspan[-1])
            plt.ylim(0, max(y_pos))

            plt.subplot(312)

            for name in self.model.odes[sp].as_coefficients_dict():
                mon = name
                mon = mon.subs(self.param_values)
                var_to_study = [atom for atom in mon.atoms(sympy.Symbol)]
                arg_f1 = [numpy.maximum(self.mach_eps, self.y[str(va)][1:]) for va in var_to_study]
                f1 = sympy.lambdify(var_to_study, mon)
                mon_values = f1(*arg_f1)
                mon_name = str(name).partition('__')[2]
                plt.plot(self.tspan[1:], mon_values, label=mon_name)
            plt.ylabel('Rate(m/sec)', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.1, 0.85), loc='upper right', ncol=1)

            plt.subplot(311)
            plt.plot(self.tspan[1:], self.y['__s%d' % sp][1:], label=parse_name(self.model.species[sp]))
            plt.ylabel('Molecules', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.15, 0.85), loc='upper right', ncol=1)
            plt.suptitle('Tropicalization' + ' ' + str(self.model.species[sp]))

            # plt.show()
            plt.savefig('s%d' % sp + '.png', bbox_inches='tight', dpi=400)
    def plot(self, bit_stream):
        if self.previous_bit_stream != bit_stream.to_list():
            self.previous_bit_stream = bit_stream

            x = []
            y = []
            bit = None

            for bit_time in bit_stream.to_list():
                if bit is None:
                    x.append(bit_time)
                    y.append(0)
                    bit = 0
                elif bit == 0:
                    x.extend([bit_time, bit_time])
                    y.extend([0, 1])
                    bit = 1
                elif bit == 1:
                    x.extend([bit_time, bit_time])
                    y.extend([1, 0])
                    bit = 0

            plt.clf()
            plt.plot(x, y)
            plt.xlim([0, 10000])
            plt.ylim([-0.1, 1.1])
            plt.show()
            plt.pause(0.005)
def plot_q(frame,file_prefix='claw',file_format='petsc',path='./_output/',plot_pcolor=True,plot_slices=True,slices_xlimits=None):
    import sys
    sys.path.append('.')
    import gaussian_1d

    sol=Solution(frame,file_format=file_format,read_aux=False,path=path,file_prefix=file_prefix)
    x=sol.state.grid.x.centers
    mx=len(x)

    bathymetry = 0.5
    eta=sol.state.q[0,:] + bathymetry

    if frame < 10:
        str_frame = "00"+str(frame)
    elif frame < 100:
        str_frame = "0"+str(frame)
    else:
        str_frame = str(frame)

    fig = pl.figure(figsize=(40,10))
    ax = fig.add_subplot(111)
    ax.set_aspect(aspect=1)
    ax.plot(x,eta)
    #pl.title("t= "+str(sol.state.t),fontsize=20)
    #pl.xticks(size=20); pl.yticks(size=20)
    #pl.xlim([0, gaussian_1d.Lx])
    pl.ylim([0.5, 1.0])
    pl.xlim([0., 4.0])
    #pl.axis('equal')
    pl.savefig('./_plots/eta_'+str_frame+'_slices.png')
    pl.close()
Example #9
0
def plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):
    """
    Plot a radiallysymmetric Q model.

    plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):

    r_min=minimum radius [km], r_max=maximum radius [km], dr=radius
    increment [km]

    Currently available models (model): cem, prem, ql6
    """
    import matplotlib.pylab as plt

    r = np.arange(r_min, r_max + dr, dr)
    q = np.zeros(len(r))

    for k in range(len(r)):

        if model == 'cem':
            q[k] = q_cem(r[k])
        elif model == 'ql6':
            q[k] = q_ql6(r[k])
        elif model == 'prem':
            q[k] = q_prem(r[k])

    plt.plot(r, q, 'k')
    plt.xlim((0.0, r_max))
    plt.xlabel('radius [km]')
    plt.ylabel('Q')
    plt.show()
def plotFirstTacROC(dataset):
    import matplotlib.pylab as plt
    from os.path import join
    from src.utils import PROJECT_DIR

    plt.figure(figsize=(6, 6))
    time_sampler = TimeSerieSampler(n_time_points=12)
    evaluator = Evaluator()
    time_series_idx = 0
    methods = {
        "cross_correlation": "Cross corr.   ",
        "kendall": "Kendall        ",
        "symbol_mutual": "Symbol MI    ",
        "symbol_similarity": "Symbol sim.",
    }
    for method in methods:
        print method
        predictor = SingleSeriesPredictor(good_methods[method], time_sampler)
        prediction = predictor.predictAllInstancesCombined(dataset, time_series_idx)
        roc_auc, fpr, tpr = evaluator.evaluate(prediction)
        plt.plot(fpr, tpr, label=methods[method] + " (auc = %0.3f)" % roc_auc)
    plt.legend(loc="lower right")
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel("False Positive Rate")
    plt.ylabel("True Positive Rate")
    plt.grid()
    plt.savefig(join(PROJECT_DIR, "output", "firstTACROC.pdf"))
def fit_plot_unlabeled_data(unlabeled_data_x, labeled_data_x, labeled_data_y, fit_order, data_type, other_data_list, other_data_name):
    output = open('predictions.csv','wb')
    coeffs = np.polyfit(labeled_data_x, labeled_data_y, fit_order) #does poly git to nth deg on labeled data
    fit_eq = np.poly1d(coeffs) #Eqn from fit
    predicted_y = fit_eq(unlabeled_data_x)
    i = 0
    writer = csv.writer(output,delimiter=',')
    header = [str(data_type),str(other_data_name),'Predicted_Num_Inc']
    writer.writerow(header)
    while i < len(predicted_y):
        output_data = [unlabeled_data_x[i],other_data_list[i],predicted_y[i]]
        writer.writerow(output_data)
        print 'For '+str(data_type)+' of: '+str(unlabeled_data_x[i])+', Predicted Number of Incidents is: '+str(predicted_y[i])
        i = i + 1
    plt.scatter(unlabeled_data_x, predicted_y, color='blue', label='Predicted Number of Incidents')
    fit_line_x = np.arange(min(unlabeled_data_x), max(unlabeled_data_x), 1)
    plt.plot(fit_line_x, fit_eq(fit_line_x), color='red',linestyle='dashed',label=' Order '+str(fit_order)+' Polynomial Fit')
#____Use below line to plot actual data also!! 
    #plt.scatter(labeled_data_x, labeled_data_y, color='green', label='Actual Incident Report Data')
    plt.title('Predicted Number of 311 Incidents by '+str(data_type))
    plt.xlabel(str(data_type))
    plt.ylabel('Number of 311 Incidents')
    plt.grid()
    plt.xlim([min(unlabeled_data_x)-1500, max(unlabeled_data_x)+1500])
    plt.legend(loc='upper left')
    plt.show()
def plot_prob_effector(sens, fpr, xmax=1, baserate=0.1):
    """Plots a line graph of P(effector|positive test) against
    the baserate of effectors in the input set to the classifier.
        
    The baserate argument draws an annotation arrow
    indicating P(pos|+ve) at that baserate
    """
    assert 0.1 <= xmax <= 1, "Max x axis value must be in range [0,1]"
    assert 0.01 <= baserate <= 1, "Baserate annotation must be in range [0,1]"
    baserates = pylab.arange(0, 1.05, xmax * 0.005)  
    probs = [p_correct_given_pos(sens, fpr, b) for b in baserates]
    pylab.plot(baserates, probs, 'r')
    pylab.title("P(eff|pos) vs baserate; sens: %.2f, fpr: %.2f" % (sens, fpr))
    pylab.ylabel("P(effector|positive)")
    pylab.xlabel("effector baserate")
    pylab.xlim(0, xmax)
    pylab.ylim(0, 1)
    # Add annotation arrow
    xpos, ypos = (baserate, p_correct_given_pos(sens, fpr, baserate))
    if baserate < xmax:
        if xpos > 0.7 * xmax:
            xtextpos = 0.05 * xmax
        else:
            xtextpos = xpos + (xmax-xpos)/5.
        if ypos > 0.5:
            ytextpos = ypos - 0.05
        else:
            ytextpos = ypos + 0.05
        pylab.annotate('baserate: %.2f, P(pos|+ve): %.3f' % (xpos, ypos), 
                       xy=(xpos, ypos), 
                       xytext=(xtextpos, ytextpos),
                       arrowprops=dict(facecolor='black', shrink=0.05))
    else:
        pylab.text(0.05 * xmax, 0.95, 'baserate: %.2f, P(pos|+ve): %.3f' %
                   (xpos, ypos))
def draw_lineplot(x, y, title="title", xlab="x", ylab="y", odir="", xlim=None, ylim=None, outfmt='eps'):

  if len(x) == 0 or len(y) == 0:
    return;
  #fi

  plt.cla();
  plt.plot(x, y, marker='x');
  plt.xlabel(xlab);
  plt.ylabel(ylab);
  plt.title(title);

  if xlim == None:
    xmin = min(x);
    xmax = max(x);
    xlim = [xmin, xmax];
  #fi

  if ylim == None:
    ymin = min(y);
    ymax = max(y);
    ylim = [ymin, ymax];
  #fi

  plt.xlim(xlim);
  plt.ylim(ylim);

  plt.savefig('%s%s.%s' % (odir + ('/' if odir else ""), '_'.join(title.split(None)), outfmt), format=outfmt);

  return '%s.%s' % ('_'.join(title.split(None)), outfmt), title;
Example #14
0
def plot_behavior_count(agent_id, behavior_count):
    """Create a plot for the behavior count.

    Args:
        agent_id: The identifier of the agent.
        behavior_count: The count of the agent behaviors.
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)
    plt.xlabel(u'Número de jogos')
    plt.ylabel(u'Probabilidades de selecionar comportamento')
    plt.title(u'Probabilidades do agente %d selecionar comportamento'
              % agent_id)

    plt.xlim([0, 115])
    plt.ylim([-0.1, 1.1])

    data = np.array([b for b in behavior_count.values()])
    prob = data/np.sum(data, axis=0)

    for i, behavior in enumerate(behavior_count):
        coeff = calculate_regression_coefficients(prob[i], degree=4)
        regression = [calculate_regression_y(x, coeff)
                      for x in range(len(prob[i]))]
        ax.plot(regression, label=T[behavior],
                c=COLOR_TABLE[COLOR_LIST[i]], linewidth=2.0)
        ax.scatter(range(len(prob[i])), prob[i], c=COLOR_TABLE[COLOR_LIST[i]],
                   alpha=1)

    ax.legend()
Example #15
0
def plot_average(filenames, save_plot=True, show_plot=False, dpi=100):

    ''' Plot Signal average from a list of averaged files. '''

    fname = get_files_from_list(filenames)

    # plot averages
    pl.ioff()  # switch off (interactive) plot visualisation
    factor = 1e15
    for fnavg in fname:
        name = fnavg[0:len(fnavg) - 4]
        basename = os.path.splitext(os.path.basename(name))[0]
        print fnavg
        # mne.read_evokeds provides a list or a single evoked based on condition.
        # here we assume only one evoked is returned (requires further handling)
        avg = mne.read_evokeds(fnavg)[0]
        ymin, ymax = avg.data.min(), avg.data.max()
        ymin *= factor * 1.1
        ymax *= factor * 1.1
        fig = pl.figure(basename, figsize=(10, 8), dpi=100)
        pl.clf()
        pl.ylim([ymin, ymax])
        pl.xlim([avg.times.min(), avg.times.max()])
        pl.plot(avg.times, avg.data.T * factor, color='black')
        pl.title(basename)

        # save figure
        fnfig = os.path.splitext(fnavg)[0] + '.png'
        pl.savefig(fnfig, dpi=dpi)

    pl.ion()  # switch on (interactive) plot visualisation
Example #16
0
    def plot_df(self,show=False):
        from matplotlib import pylab as plt 

        if self.afp is None:
            print 'afp not initilized. call update afp'
            return -1

        linecords,td,df,rtn,minmaxy = self.afp

        formatter = PlotDateFormatter(df.index)
        #fig = plt.figure()
        #ax = plt.addsubplot()
        fig, ax = plt.subplots()
        ax.xaxis.set_major_formatter(formatter)
    
        ax.plot(np.arange(len(df)), df['p'])

        for cord in linecords:
            plt.plot(cord[0],cord[1],color='red')

        fig.autofmt_xdate()
        plt.xlim(-10,len(df.index) + 10)
        plt.ylim(df.p.min() - 10,df.p.max() + 10)
        plt.grid(ax)
        #if show:
        #    plt.show()
        
        #"{0}{1}.png".format("./data/",datetime.datetime.strftime(datetime.datetime.now(),'%Y%M%m%S'))
        if self.plot_file:
            save_path = self.plot_file.format(self.symbol)
            if os.path.exists(os.path.dirname(save_path)):
                plt.savefig(save_path)
                
        plt.clf()
        plt.close()
Example #17
0
def nova_plot():

	erg2mev=624151.

	fig=plot.figure()
	yrange = [1e-6,2e-4]
	xrange = [1e-1,1e5]
	plot.fill_between([0.2,10e3],[yrange[1],yrange[1]],[yrange[0],yrange[0]],facecolor='yellow',interpolate=True,color='yellow',alpha=0.5)
	plot.annotate('AMEGO',xy=(3,9e-5),xycoords='data',fontsize=26,color='black')

	lat=ascii.read("data/NMon2012.LAT.dat",names=['energy','en_low','en_high','flux','flux_err','tmp'])
	plot.scatter(lat['energy'],lat['flux']*erg2mev,color='red')
	plot.errorbar(lat['energy'],lat['flux']*erg2mev,xerr=[lat['en_low'],lat['en_high']],yerr=lat['flux_err']*erg2mev,ecolor='red',capsize=0,fmt='none')
	latul=ascii.read("data/NMon2012.LAT.limits.dat",names=['energy','en_low','en_high','flux','tmp1','tmp2','tmp3','tmp4'])
	plot.errorbar(latul['energy'],latul['flux']*erg2mev,xerr=[latul['en_low'],latul['en_high']],yerr=0.5*latul['flux']*erg2mev,uplims=True,ecolor='red',capsize=0,fmt='none')
	plot.scatter(latul['energy'],latul['flux']*erg2mev,color='red')

	leptonic=ascii.read("data/sp-NMon12-IC-best-fit-1MeV-30GeV.txt",names=['energy','flux'],data_start=1)
	hadronic=ascii.read("data/sp-NMon12-pi0-and-secondaries.txt",names=['energy','flux1','flux2'],data_start=1)	

	plot.plot(leptonic['energy'],leptonic['flux']*erg2mev,'r--',color='black',lw=2,label='Leptonic')
	plot.plot(hadronic['energy'],hadronic['flux2']*erg2mev,color='black',lw=2,label='Hadronic+Secondary Leptons')

	plot.legend(loc='upper right',fontsize='small',frameon=False,framealpha=0.5)
	plot.xscale('log')
	plot.yscale('log')
	plot.ylim(yrange)
	plot.xlim(xrange)
	plot.xlabel(r'Energy (MeV)')
	plot.ylabel(r'Energy$^2 \times $ Flux (Energy) (erg cm$^{-2}$ s$^{-1}$)')
	plot.title('Nova V339 Del 2013')
	plot.savefig('Nova_SED.png', bbox_inches='tight')
	plot.savefig('Nova_SED.eps', bbox_inches='tight')
	plot.show()
	plot.close()
def chooseDegree(npts, mindegree=0, maxdegree=20, filename=None):
    """Gets noisy data, uses cross validation to estimate error, and fits new data with best model."""
    x, y = bv.noisyData(npts)
    degrees = numpy.arange(mindegree,maxdegree+1)
    errs = numpy.zeros_like(degrees,dtype=numpy.float)
    for i,d in enumerate(degrees):
        errs[i] = estimateError(x, y, d)

    plt.subplot(1,2,1)
    plt.plot(degrees,errs,'bo-')
    plt.xlabel("Degree")
    plt.ylabel("CV Error")

    besti = numpy.argmin(errs)
    bestdegree = degrees[besti]

    plt.subplot(1,2,2)
    x2, y2 = bv.noisyData(npts)
    plt.plot(x2,y2,'ro')
    xs = numpy.linspace(min(x),max(x),150)
    fitf = numpy.poly1d(numpy.polyfit(x2,y2,bestdegree))
    plt.plot(xs,fitf(xs),'g-',lw=2)
    plt.xlim((bv.MIN,bv.MAX))
    plt.ylim((-2.,2.))
    plt.suptitle('Selected Degree '+str(bestdegree))
    bv.outputPlot(filename)
Example #19
0
def _gaussian_test():
    import matplotlib.pyplot as plt
    n = 10000
    mu_x = 0.0
    mu_y = 0.0
    #sig_x, sig_y = 1.5, 1.5
    tau = 0.0
    seeing = 1.5
    sigma = seeing / (2. * np.sqrt(2. * np.e))
    slit_width = 0.2
    slit_height = 10.0
    slit_x = np.empty(n, dtype=np.float64)
    slit_y = np.empty(n, dtype=np.float64)
    slit_x, slit_y = slit_gaussian_psf(n, mu_x, mu_y, sigma, sigma, tau, slit_width, slit_height)
    log.info("x range: [%s, %s]", slit_x.min(), slit_x.max())
    log.info("y range: [%s, %s]", slit_y.min(), slit_y.max())
    plt.scatter(slit_x, slit_y, alpha=0.8)
    plt.fill([-slit_width/2, slit_width/2, slit_width/2, -slit_width/2],
             [-slit_height/2, -slit_height/2, slit_height/2, slit_height/2],
             'r',
             alpha=0.10,
             edgecolor='k')
    plt.gca().set_aspect("equal")
    plt.title("Gaussian distribution")
    plt.xlim([-slit_height/2., slit_height/2])
    plt.show()
Example #20
0
 def plot(x,y,field,filename,c=200):
     plt.figure()
     # define grid.
     xi = np.linspace(min(x),max(x),100)
     yi = np.linspace(min(y),max(y),100)
     # grid the data.
     si_lin = griddata((x, y), field, (xi[None,:], yi[:,None]), method='linear')
     si_cub = griddata((x, y), field, (xi[None,:], yi[:,None]), method='linear')
     print np.min(field)
     print np.max(field)
     plt.subplot(211)
     # contour the gridded data, plotting dots at the randomly spaced data points.
     CS = plt.contour(xi,yi,si_lin,c,linewidths=0.5,colors='k')
     CS = plt.contourf(xi,yi,si_lin,c,cmap=plt.cm.jet)
     plt.colorbar() # draw colorbar
     # plot data points.
     #    plt.scatter(x,y,marker='o',c='b',s=5)
     plt.xlim(min(x),max(x))
     plt.ylim(min(y),max(y))
     plt.title('Lineaarinen interpolointi')
     #plt.tight_layout()
     plt.subplot(212)
     # contour the gridded data, plotting dots at the randomly spaced data points.
     CS = plt.contour(xi,yi,si_cub,c,linewidths=0.5,colors='k')
     CS = plt.contourf(xi,yi,si_cub,c,cmap=plt.cm.jet)
     plt.colorbar() # draw colorbar
     # plot data points.
     #    plt.scatter(x,y,marker='o',c='b',s=5)
     plt.xlim(min(x),max(x))
     plt.ylim(min(y),max(y))
     plt.title('Kuubinen interpolointi')
     plt.savefig(filename)
Example #21
0
def plot_roc(test_cat, plot_data, savefig=False):
    # Plot ROC curve
    plt.clf()
    results = []

    # calcualte and sort labels by roc_auc
    for method, method_results in plot_data.items():
        fpr, tpr, roc_auc = compute_roc(test_cat, method_results, method)
        label = "[%s] area = %0.2f" % (method, roc_auc)
        res = {"label": label, "fpr": fpr, "tpr": tpr, "roc_auc": roc_auc}
        results.append(res)
    results = sorted(results, key=lambda k: k['roc_auc'], reverse=True)

    # plot according to roc_auc ranking
    for r in results:
        plt.plot(r["fpr"], r["tpr"], label=r["label"])

    plt.title('Receiver Operating Characteristic Curve (ROC)')
    plt.legend(loc="lower right")
    plt.plot([0, 1], [0, 1], 'k--')

    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.0])

    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')

    if savefig:
        plt.savefig("classifiers_roc.png")

    plt.show()
Example #22
0
    def plot(self,file):
        cds = CaseDataset(file, 'bson')
        data = cds.data.driver('driver').by_variable().fetch()
        cds2 = CaseDataset('../output/therm_mc_20141110173851.bson', 'bson')
        data2 = cds2.data.driver('driver').by_variable().fetch()
        
        #temp
        temp_boundary_k = data['hyperloop.temp_boundary']
        temp_boundary_k.extend(data2['hyperloop.temp_boundary'])
        temp_boundary = [((x-273.15)*1.8 + 32) for x in temp_boundary_k]
        #histogram
        n, bins, patches = plt.hist(temp_boundary, 100, normed=1, histtype='stepfilled')
        plt.setp(patches, 'facecolor', 'b', 'alpha', 0.75)

        #stats
        mean = np.average(temp_boundary)
        std = np.std(temp_boundary)
        percentile = np.percentile(temp_boundary,99.5)
        print "mean: ", mean, " std: ", std, " 99.5percentile: ", percentile
        x = np.linspace(50,170,150)
        plt.plot(x,mlab.normpdf(x,mean,std), color='black', lw=2)
        plt.xlim([60,160])
        plt.ylabel('Probability', fontsize=18)
        plt.xlabel(u'Equilibrium Temperature, \N{DEGREE SIGN}F', fontsize=18)
        #plt.show()
        plt.tight_layout()
        plt.savefig('../output/histo.pdf', dpi=300)
Example #23
0
def errbar_labels(labels,data,error,bar_opts={},ticks_opts={}):
    from matplotlib.pylab import plot,xlim
    bar_opts.update(opts(width=0,linewidth=0, fill=0))
    xlocations=barplot(labels,data,error,bar_opts,ticks_opts)
    plot(xlocations,data,"or")
    xlim(0, xlocations[-1]+0.5)
    return xlocations
Example #24
0
def plot_eye(Nodes,axes = None):
    """
    
    Create a movie of eye growth. To be used with EyeGrowthFunc

    :param Nodes: structure containing nodes
    :type Nodes: struct
    :param INTSTEP: time step used for integration
    :type INTSTEP: int

    :returns: plot handle for Node plot.  Used to update during for loop.

    .. note::
       Called in EyeGrowthFunc
    """
    
    
    #set plotting parameters:
    if axes == None:
        fig = plt.figure(figsize=(10, 8))
        axes = fig.add_subplot(111,aspect='equal')
        plt.xlim([-13, 13])
        plt.ylim([-13, 13])
        
    axes.plot(np.r_[ Nodes['x'][0],Nodes['x'][0,0] ] * Nodes['radius'], 
            np.r_[ Nodes['y'][0], Nodes['y'][0,0] ] * Nodes['radius'], 
            '-ok', markerfacecolor = 'k',linewidth = 4, markersize = 10)
                
    axes = pf.TufteAxis(axes,['left','bottom'])
    #axes.set_axis_bgcolor('w')


    return axes
def plot_tuning_curves(direction_rates, title):
    """
    This function takes the x-values and the y-values  in units of spikes/s 
    (found in the two columns of direction_rates) and plots a histogram and 
    polar representation of the tuning curve. It adds the given title.
    """
    x = direction_rates[:,0]
    y = direction_rates[:,1]
    plt.figure()
    plt.subplot(2,2,1)
    plt.bar(x,y,width=45,align='center')
    plt.xlim(-22.5,337.5)
    plt.xticks(x)
    plt.xlabel('Direction of Motion (degrees)')
    plt.ylabel('Firing Rate (spikes/s)')
    plt.title(title)   
        
        
    
    plt.subplot(2,2,2,polar=True)
    r = np.append(y,y[0])
    theta = np.deg2rad(np.append(x, x[0]))
    plt.polar(theta,r,label='Firing Rate (spikes/s)')
    plt.legend(loc=8)
    plt.title(title)
Example #26
0
def unteraufgabe_g():
    # Sampling punkte
    x = np.linspace(0.0,1.0,1000)
    N = np.arange(2,16)

    LU = np.ones_like(N,dtype=np.floating)
    LT = np.ones_like(N,dtype=np.floating)

    # Approximiere Lebesgue-Konstante
    for i,n in enumerate(N):
        ################################################################
        #
        # xU = np.linspace(0.0,1.0,n)
        #
        # LU[i] = ...
        #
        # j = np.arange(n+1)
        # xT = 0.5*(np.cos((2.0*j+1.0)/(2.0*(n+1.0))*np.pi) + 1.0)
        #
        # LT[i] = ...
        #
        ################################################################
        continue

    # Plot
    plt.figure()
    plt.semilogy(N,LU,"-ob",label=r"Aequidistante Punkte")
    plt.semilogy(N,LT,"-og",label=r"Chebyshev Punkte")
    plt.grid(True)
    plt.xlim(N.min(),N.max())
    plt.xlabel(r"$n$")
    plt.ylabel(r"$\Lambda^{(n)}$")
    plt.legend(loc="upper left")
    plt.savefig("lebesgue.eps")
Example #27
0
def plot_lift_data(lift_data, with_ellipses=True):
    np.random.seed(42113)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    alpha = [l['fit']['alpha'] for l in lift_data.values()]
    alpha_error = [l['fit']['alpha_error'] for l in lift_data.values()]
    beta = [l['fit']['beta'] for l in lift_data.values()]
    beta_error = [l['fit']['beta_error'] for l in lift_data.values()]
    message_class = lift_data.keys()

    num = len(beta)
    beta_jitter = np.random.randn(num)
    np.random.seed(None)
    beta = np.array(beta) + beta_jitter*0.0

    ax.plot(beta, alpha, color='red', linestyle='', marker='o', markersize=10)
    if not with_ellipses:
        ax.errorbar(beta, alpha, xerr=beta_error, yerr=alpha_error, linestyle='')
    else:
        for x, y, xerr, yerr, in zip(beta, alpha, beta_error, alpha_error):
            width = 2*xerr
            height = 2*yerr
            ellipse = patches.Ellipse((x, y), width, height,
                                      angle=0.0, linewidth=2,
                                      fill=True, alpha=0.15, color='gray')
            ax.add_patch(ellipse)

    for a, b, c in zip(alpha, beta, message_class):
        ax.annotate(c, xy=(b, a), xytext=(b+2, a+.01), fontsize=17)
    plt.xlim(0, max(beta)+30)
    plt.ylim(0, 0.9)
    plt.xlabel('Duration (days)')
    plt.ylabel('Initial Lift')
    plt.show()
def plot_prediction_accuracy(x, y):
    plt.scatter(x, y, c='g', alpha=0.5)
    plt.title('Logistic Regression')
    plt.xlabel('r')
    plt.ylabel('Prediction Accuracy')
    plt.xlim(0,200)
    plt.show()
Example #29
0
def plot(all_models):

    import matplotlib.pylab as plt
    import numpy.random
    plt.close("all")
    plt.figure()
    plt.subplot(211)
    alt = np.arange(0., 500., 2.)
    sza = 0.

    for m in all_models:
        d = m(alt, sza)
        plt.plot(ne_to_fp(d)/1E6, alt,lw=2)
        # plt.plot(m(alt, sza),alt,lw=2)

    plt.ylim(0., 400.)
    plt.ylabel('Altitude / km')
    # plt.xlabel(r'$n_e / cm^{-3}$')
    plt.xlabel(r'$f / MHz$')
    plt.subplot(212)
    for m in all_models:
        delay, freq = m.ais_response()
        plt.plot(freq/1E6, delay*1E3, lw=2.)

    plt.hlines(-2 * np.amax(alt) / speed_of_light_kms * 1E3, *plt.xlim(), linestyle='dashed')
    # plt.vlines(ne_to_fp(1E5)/1E6, *plt.ylim())
    # plt.hlines(  -(500-150) * 2 / speed_of_light_kms * 1E3, *plt.xlim())
    plt.ylim(-10,0)
    plt.ylabel('Delay / ms')
    plt.xlim(0, 7)
    plt.xlabel('f / MHz')
    plt.show()
Example #30
0
def plot_fullstack( binning = np.linspace(0,10,1), myquery='', plotvar = default_plot_variable, \
                    scalefactor = 1., user_ylim = None):

    fig = plt.figure(figsize=(10,6))
    plt.grid(True)
    lasthist = 0
    myhistos = gen_histos(binning=binning,myquery=myquery,plotvar=plotvar,scalefactor=scalefactor)
    for key, (hist, bins) in myhistos.iteritems():

      plt.bar(bins[:-1],hist,
              width=bins[1]-bins[0],
              color=colors[key],
              bottom = lasthist,
              edgecolor = 'k',
              label='%s: %d Events'%(labels[key],sum(hist)))
      lasthist += hist
     

    plt.title('CCSingleE Stacked Backgrounds',fontsize=25)
    plt.ylabel('Events',fontsize=20)
    if plotvar == '_e_nuReco' or plotvar == '_e_nuReco_better':
        xstring = 'Reconstructed Neutrino Energy [GeV]' 
    elif plotvar == '_e_CCQE':
        xstring = 'CCQE Energy [GeV]'
    else:
        xstring = plotvar
    plt.xlabel(xstring,fontsize=20)
    plt.legend()
    plt.xticks(list(plt.xticks()[0]) + [binning[0]])
    plt.xlim([binning[0],binning[-1]])
Example #31
0
kva = w/va

fig=plt.figure(num=1,figsize=(3.5,3.5),dpi=300,facecolor='w',edgecolor='k')
left  = 0.15  # the left side of the subplots of the figure
right = 0.95    # the right side of the subplots of the figure
bottom = 0.23   # the bottom of the subplots of the figure
top = 0.96      # the top of the subplots of the figure
wspace = 0.2   # the amount of width reserved for blank space between subplots
hspace = 0.1   # the amount of height reserved for white space between subplots
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
ax = plt.subplot(1,1,1)

plt.plot(k,w/1000000,label='Whistler')
plt.plot(kva,w/1000000,label='Alfven')
plt.xlim(0,100)
plt.ylim(0,4)
plt.hlines(wci/1000000,0,100,color='green',linestyles='dotted')
plt.hlines(300000*2*np.pi/1000000,0,100,color='grey',linestyles='dashed',linewidth=0.5)
plt.xlabel(r'k [rad/m]',fontsize=10)
plt.ylabel(r'$\omega \times 10^{6}$ [Rad/Sec]',fontsize=10)
plt.text(10,3.65,r'$\omega_{ci}$',fontsize=6,color='green')
plt.text(10,2,r'$f\approx 300kHz$',fontsize=5)
plt.text(0,-1,'B='+str(B)+'G',fontsize=8)
leg=plt.legend(loc='lower right',fontsize=5,frameon=False)

plt.figure(2)
plt.semilogy(k,w/1000000,label='Whistler')
plt.semilogy(kva,w/1000000,label='Alfven')
plt.xlim(0,100)
plt.ylim(5e-1,4e0)
Example #32
0
n = 20

t = np.linspace(0, n - 1, n)
fv_1 = np.ones(len(t)) * pv
fv_2 = pv * (1 + r * t)  #simple interest
fv_3 = pv * (1 + r)**t

fv_1plot = plt.plot(t, fv_1, 'b-', label='Under your pillow')
fv_2plot = plt.plot(t, fv_2, 'g-', label='Simple Interest')
fv_3plot = plt.plot(t, fv_3, 'r-', label='Compound Interest')
#plt.plot(t,examp4,'m-')
plt.title(
    "One time investment of $1000 \n Simple vs. Compounded Interest Rate, 8%")
plt.xlabel("Number of Years, t")
plt.ylabel("US Dollars")
plt.xlim(0, 21)
plt.ylim(0, 5000)
plt.legend()
#plt.show()
plt.savefig('finance_int_compound.png', dpi=600)

pmt = -1000
fv = -1000
pv = 1000
r = 0.08
n = 20
rate1 = 0.001
rate2 = 0.08
nper = np.arange(0, n)
examp4 = np.fv(0, nper, pmt, fv, when='begin')
examp5 = np.fv(rate1, nper, pmt, fv, when='begin')
    def plot(self,
             title,
             xlim=None,
             ylim=None,
             plot_type='amplitude',
             show_limits=False,
             show_ssa_limits=False,
             beam_on=False,
             annotate=False,
             beam_start=20,
             beam_end=22,
             hide_fwd=False):
        """ Plot signals of interest in RF Station time-series simulation.
        Supports amplitude, phase and cartesian plots with a number of options.
        """
        fund_k_probe = self.fund_mode_dict['k_probe']
        fund_k_drive = self.fund_mode_dict['k_drive']
        fund_k_em = self.fund_mode_dict['k_em']
        fund_k_beam = self.fund_mode_dict['k_beam']

        # Amplitude
        if plot_type == 'amplitude':
            if hide_fwd == False:
                plt.plot(self.trang * 1e3,
                         np.abs(self.E_fwd) * fund_k_drive,
                         '-',
                         label=r'Forward $\left(\vec E_{\rm fwd}\right)$',
                         linewidth=2)
                plt.plot(self.trang * 1e3,
                         np.abs(self.E_reverse / fund_k_em),
                         '-',
                         label=r'Reverse $\left(\vec E_{\rm reverse}\right)$',
                         linewidth=2)
            plt.plot(self.trang * 1e3,
                     np.abs(self.cav_v),
                     '-',
                     label=r'Cavity Field',
                     linewidth=2,
                     color='r')
            plt.plot(self.trang * 1e3,
                     np.abs(self.set_point / fund_k_probe),
                     '-',
                     label=r'Set-point $\left(\vec E_{\rm sp}\right)$',
                     linewidth=2,
                     color='c')
            # Y label
            plt.ylabel('Amplitude [V]', fontsize=30)

            if show_limits == True:
                plt.plot(self.trang * 1e3,
                         np.abs(self.set_point / fund_k_probe) * (1 + 1e-4),
                         label='Upper limit',
                         linewidth=2,
                         color='m')
                plt.plot(self.trang * 1e3,
                         np.abs(self.set_point / fund_k_probe) * (1 - 1e-4),
                         label='Lower limit',
                         linewidth=2,
                         color='y')
                plt.axhspan(16e6 / 1.00005,
                            16e6 * 1.00005,
                            color='blue',
                            alpha=0.2)

            if show_ssa_limits == True:
                # If SSA noise were a sine wave, this would be the amplitude limit
                ssa_limit = 4e-2 * fund_k_drive * np.sqrt(3.8e3)
                plt.plot(self.trang * 1e3,
                         np.abs(self.set_point / fund_k_probe) +
                         ssa_limit / np.sqrt(2) / 2,
                         label='SSA Upper (RMS) limit',
                         linewidth=2,
                         color='m')
                plt.plot(self.trang * 1e3,
                         np.abs(self.set_point / fund_k_probe) -
                         ssa_limit / np.sqrt(2) / 2,
                         label='SSA Lower (RMS) limit',
                         linewidth=2,
                         color='y')

                low_index = np.where(self.trang == 20e-3)[0][0]
                high_index = np.where(self.trang == 49.9e-3)[0][0]

                ssa_std = np.std(self.E_fwd[low_index:high_index] *
                                 fund_k_drive)
                ssa_std_percent = 100 * ssa_std / (fund_k_drive *
                                                   np.sqrt(3.8e3))
                ssa_std_text = r'$\sigma_{\rm SSA}\,=\,$' + '%.2f %% RMS' % (
                    np.abs(ssa_std_percent))
                plt.text(35,
                         13e6,
                         ssa_std_text,
                         verticalalignment='top',
                         fontsize=30)

        # Phase
        if plot_type == 'phase':
            plt.plot(self.trang * 1e3,
                     np.angle(self.cav_v, deg=True),
                     '-',
                     label=r'Cavity Field',
                     linewidth=2,
                     color='r')
            plt.plot(self.trang * 1e3,
                     np.angle(self.set_point / fund_k_probe, deg=True),
                     label=r'Set-point $\left(\vec E_{\rm sp}\right)$',
                     linewidth=2,
                     color='c')
            if show_limits:
                plt.plot(self.trang * 1e3,
                         1e-2 * np.ones(len(self.trang)),
                         label='Upper limit',
                         linewidth=2,
                         color='m')
                plt.plot(self.trang * 1e3,
                         -1e-2 * np.ones(len(self.trang)),
                         label='Lower limit',
                         linewidth=2,
                         color='y')
                plt.axhspan(-4e-3, 4e-3, color='blue', alpha=0.2)

            # Y label
            plt.ylabel('Phase [degrees]', fontsize=30)

        # Cartesian coordinates
        if plot_type == 'cartesian':
            plt.plot(self.trang * 1e3,
                     np.real(self.E_fwd) * fund_k_drive,
                     '-',
                     label=r'Forward $\Re \left(\vec E_{\rm fwd}\right)$',
                     linewidth=2)
            plt.plot(self.trang * 1e3,
                     np.imag(self.E_fwd) * fund_k_drive,
                     '-',
                     label=r'Forward $\Im \left(\vec E_{\rm fwd}\right)$',
                     linewidth=2)
            plt.plot(self.trang * 1e3,
                     np.real(self.cav_v),
                     '-',
                     label=r'$\Re$ Cavity Field',
                     linewidth=2)
            plt.plot(self.trang * 1e3,
                     np.imag(self.cav_v),
                     '-',
                     label=r'$\Im$ Cavity Field',
                     linewidth=2)

        # Add annotation for a very specific plot
        if annotate:
            delta_E_fwd_text = r'$\Delta \left| \vec E_{\rm fwd} \right| \approx \,$' + \
                '%.d MV' % (round(np.abs(fund_k_beam)*100e-6*1e-6))
            plt.annotate(delta_E_fwd_text, xy=(21.05, 18e6), fontsize=25)
            index_of_21 = np.where(self.trang == 21e-3)
            plt.annotate(s='',
                         xy=(21,
                             np.abs(self.E_fwd[index_of_21]) * fund_k_drive),
                         xytext=(21, np.abs(self.cav_v[index_of_21])),
                         arrowprops=dict(arrowstyle='<->'))

        # Add clear red shade if beam current is on to highlight the location of the beam train
        if beam_on:
            plt.axvspan(beam_start, beam_end, color='red', alpha=0.2)

        plt.xticks(fontsize=20)
        plt.yticks(fontsize=20)
        plt.title(title, fontsize=40, y=1.02)
        plt.xlabel('Time [ms]', fontsize=30)

        if xlim:
            plt.xlim(xlim)

        if ylim:
            plt.ylim(ylim)

        plt.rc('font', **{'size': 20})
        plt.legend(loc='upper right')

        plt.show()
def main():
    name_to_labels = load_labels()
    for k,v in name_to_labels.items():
        name_to_labels[k] = [0 if i==0 else 1 for i in v]
    name_to_feats = {}
    for k,v in name_to_labels.items():
        name_to_feats[k] = []
        for i,_ in enumerate(v):
            # draws 1 index
            frame = load_raw(k, i+1)
            if frame is not None:
                feat = features(frame, draw_num=i)
            else:
                feat = None
            name_to_feats[k].append(feat)

    #max_feats = max([len(x) for x in name_to_feats.values()[0] if x is not None])
    #for k,v in name_to_feats.items():
        #for i in range(len(v)):
            #v[i] = v[i] + [0]*(max_feats - len(v[i]))


    # fix the features where can't make with zeros

    accum = []
    for x in trange(10000):
        patients = name_to_labels.keys()
        n_train = int(len(patients) * .8)
        train_pat = np.random.choice(patients, size=n_train).tolist()
        test_pat = list(set(patients) - set(train_pat))

        trX, trY = gather_feats_labels(train_pat, name_to_feats, name_to_labels)
        teX, teY = gather_feats_labels(test_pat, name_to_feats, name_to_labels)
        if len(np.unique(teY)) == 1:
            continue
        if len(np.unique(trY)) == 1:
            continue
        X, Y = gather_feats_labels(patients, name_to_feats, name_to_labels)
        idx = np.arange(len(X))
        np.random.shuffle(idx)
        spt = int(len(X)*.8)
        trX = X[idx[:spt]]
        teX = X[idx[spt:]]
        trY = Y[idx[:spt]]
        teY = Y[idx[spt:]]


        trPY, tePY = shuffle_train(trX, trY, teX)

        heldout_auc = roc_auc_score(teY, tePY)
        train_auc = roc_auc_score(trY, trPY)

        fpr, tpr, _ = roc_curve(teY, tePY)
        #plt.plot(fpr, tpr)
        accum.append([heldout_auc, train_auc])
    print np.mean(accum, axis=0), np.var(accum, axis=0)
    accum = np.asarray(accum)
    plt.subplot(2,1,1)
    plt.title("Histogram of AUC from ML based model")
    plt.hist(accum[:, 0], bins=100, normed=True, range=[0,1])
    plt.xlabel("Validation AUC")
    plt.subplot(2,1,2)
    plt.hist(accum[:, 1], bins=100, normed=True, range=[0,1])
    plt.xlim([0, 1])
    plt.xlabel("Train AUC")
    plt.show()
Example #35
0
def plot_2d_dist2(x,
                  y,
                  xlim,
                  ylim,
                  nxbins,
                  nybins,
                  cmin=1.e-4,
                  cmax=1.0,
                  log=False,
                  weights=None,
                  xlabel='x',
                  ylabel='y',
                  clevs=None,
                  smooth=None,
                  fig_setup=None,
                  savefig=None):
    """
    log = specifies whether logged quantities are passed to be plotted on log-scale outside this routine
    """
    if fig_setup is None:
        fig, ax = plt.subplots(figsize=(2.5, 2.5))
        plt.ylabel(ylabel)
        plt.xlabel(xlabel)
        plt.xlim(xlim[0], xlim[1])
        plt.ylim(ylim[0], ylim[1])
    else:
        ax = fig_setup

    if xlim[1] < 0.: ax.invert_xaxis()

    if weights is None: weights = np.ones_like(x)
    H, xbins, ybins = np.histogram2d(x,
                                     y,
                                     weights=weights,
                                     bins=(np.linspace(xlim[0], xlim[1],
                                                       nxbins),
                                           np.linspace(ylim[0], ylim[1],
                                                       nybins)))

    H = np.rot90(H)
    H = np.flipud(H)

    #X,Y = np.meshgrid(xbins,ybins)
    X, Y = np.meshgrid(xbins[:-1], ybins[:-1])
    if smooth is not None:
        if (np.size(smooth) < 2):
            raise Exception(
                "smooth needs to be an array of size 2 containing 0=SG window size, 1=SG poly order"
            )
        H = sgolay2d(H, window_size=smooth[0], order=smooth[1])

    H = H / np.sum(H)
    Hmask = np.ma.masked_where(H == 0, H)

    if log:
        X = np.power(10., X)
        Y = np.power(10., Y)

    pcol = ax.pcolormesh(X,
                         Y, (Hmask),
                         vmin=cmin * np.max(Hmask),
                         vmax=cmax * np.max(Hmask),
                         cmap=plt.cm.BuPu,
                         norm=LogNorm(),
                         linewidth=0.,
                         rasterized=True)
    pcol.set_edgecolor('face')

    if clevs is not None:
        lvls = []
        for cld in clevs:
            sig = opt.brentq(conf_interval, 0., 1., args=(H, cld))
            lvls.append(sig)

        ax.contour(X,
                   Y,
                   H,
                   linewidths=(1.0, 0.75, 0.5, 0.25),
                   colors='black',
                   levels=sorted(lvls),
                   norm=LogNorm(),
                   extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]])
    if savefig:
        plt.savefig(savefig, bbox_inches='tight')
    if fig_setup is None:
        plt.show()
    return
Example #36
0
    transit_duration, 
    [planet, firstmoon, secondmoon])


# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
    '[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
    '[min] = ', numpy.amax(tdv_array) * 60, '[sec]'

ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)

# Fix axes for comparison with eccentric moon
plt.xlim(-0.4, +0.4)
plt.ylim(-1, +1)


plt.savefig("fig_system_ix.eps", bbox_inches = 'tight')

    x = inti_x
    x_history = []

    for i in range(step_num):
        x_history.append(x.copy())
        grad = numerical_gradient(f, x)  #数値微分
        x -= lr * grad

    return x, np.array(x_history)


def function_2(x):
    return x[0]**2 + x[1]**2


init_x = np.array([-3.0, 4.0])

lr = 0.1
step_num = 20
x, x_history = gradient_descent(function_2, init_x, lr=lr, step_num=step_num)

plt.plot([-5, 5], [0, 0], '--b')
plt.plot([0, 0], [-5, 5], '--b')
plt.plot(x_history[:, 0], x_history[:, 1], 'o')

plt.xlim(-3.5, 3.5)
plt.ylim(-4.5, 4.5)
plt.xlabel("x0")
plt.ylabel("x1")
plt.show()
Example #38
0
def plot_cluster_expression(out,data1,data2,donor,gene, image):

    # function for setting the colors of the box plots pairs
    def setBoxColors(bp):
        setp(bp['boxes'][0], color='blue')
        setp(bp['caps'][0], color='blue')
        setp(bp['caps'][1], color='blue')
        setp(bp['whiskers'][0], color='blue')
        setp(bp['whiskers'][1], color='blue')
        setp(bp['fliers'][0], color='blue')
        setp(bp['fliers'][1], color='blue')
        setp(bp['medians'][0], color='blue')

        setp(bp['boxes'][1], color='red')
        setp(bp['caps'][2], color='red')
        setp(bp['caps'][3], color='red')
        setp(bp['whiskers'][2], color='red')
        setp(bp['whiskers'][3], color='red')
        setp(bp['fliers'][2], color='red')
        setp(bp['fliers'][3], color='red')
        setp(bp['medians'][1], color='red')

    N_probes=data1.shape[0]

    fig = figure()
    ax = axes()
    hold(True)

    s=1
    f=2
    p_value=[]
    t_stat=[]
    ticks=[]
    for i in range(N_probes):
        t,p=stats.ttest_ind(data1[i,:], data2[i,:])
        bp = boxplot([data1[i,:],data2[i,:]], positions = [s, f], widths = 0.6)
        setBoxColors(bp)
        ticks.append( (s+f)/2. )
        s+=3
        f+=3
        p_value.append(p)
        t_stat.append(t)

    hB, = plot([1,1],'b-')
    hR, = plot([1,1],'r-')

    xlim(0,f+2)
    ylim(2,20)
    legend((hB, hR),('Inside', 'Outside'))

    for i in range(N_probes):
        text(f+3,10-i,'Probe #{}: p-value={}'.format(i+1,np.round(p_value[i],3) ) )
    ax.set_xticklabels(['probe #{}'.format(j) for j in range(1,N_probes+1)])
    ax.set_xticks(ticks)
    title('Donor {}, Allen Brain expression of gene {} inside/outside clusters formed in {} image'.format(donor, gene,image))

    hB.set_visible(False)
    hR.set_visible(False)
    try:
        savefig(os.path.join(out,donor+"_"+gene+".png") )
    except:
        savefig(os.path.join(out,donor+"_"+gene+".svg") )
Example #39
0
scores = np.zeros(len(densities))
trials = 10
for i, density in enumerate(densities):
    for n in xrange(trials):
        window = np.empty(sh)
        for f in xrange(sh[-1]):
            #window = (np.random.random(sh) < density).astype(np.float64)
            window[...,f] = (np.random.random(sh[:-1]) < back[0,0,f] * density).astype(np.float64)
        #print "Density", density, window.sum()/np.prod(window.shape)

        #contribution_map = np.zeros(sh[:-1]) 
        data = \
            window * np.log(kernels[mixcomp]) + \
            (1.0-window) * np.log(1.0 - kernels[mixcomp]) + \
            (-1) * (1.0-window) * np.log(1.0 - back[0,0]) + \
            (-1) * window * np.log(back[0,0])
        #contribution_map += data

        scores[i] += data.sum()#contribution_map.sum()

scores /= trials
    

plt.plot(densities, scores)
plt.xlim((0, 1))
plt.xlabel('Feature density')
plt.ylabel('Log likelihood ratio')
plt.title('Scores for random feature activity')
plt.show()

Example #40
0
def plot(experiment_path,
         save_dir="/tmp/",
         save_name="results",
         limit_steps=None):

    fig = plt.figure(figsize=(20, 10))

    if len(glob(os.path.join(experiment_path, "train/*monitor*"))) != 0:

        exps = glob(experiment_path)
        print(exps)

        # Get data
        df = load_results(os.path.join(experiment_path, "train"))
        roll = 5
        rdf = df.rolling(5)
        df['steps'] = df['l'].cumsum() / 1000000

        # Plots
        ax = plt.subplot(1, 1, 1)
        df.rolling(roll).mean().plot('steps',
                                     'r',
                                     style='-',
                                     ax=ax,
                                     legend=False)
        rdf.max().plot('steps',
                       'r',
                       style='-',
                       ax=ax,
                       legend=False,
                       color="#28B463",
                       alpha=0.65)
        rdf.min().plot('steps',
                       'r',
                       style='-',
                       ax=ax,
                       legend=False,
                       color="#F39C12",
                       alpha=0.65)

        # X axis
        gap = 0.5
        ax.set_xticks(
            np.arange(0, ((df['steps'].iloc[-1] // gap) + 1) * gap, gap))
        ax.set_xlabel('Num steps (M)')
        if limit_steps: plt.xlim(0, limit_steps)

        # Y axis
        gap = 25
        ax.set_yticks(
            np.arange(((df['r'].min() // gap) - 1) * gap,
                      ((df['r'].max() // gap) + 1) * gap, gap))
        ax.set_ylabel('Reward')
        ax.grid(True)

    fig.subplots_adjust(left=0.1,
                        bottom=0.1,
                        right=0.9,
                        top=0.9,
                        wspace=0.1,
                        hspace=0.2)

    # Save figure
    ax.get_figure().savefig(os.path.join(save_dir, save_name) + ".jpg")
    plt.clf()
import numpy as np
import matplotlib.pylab as plt

dis = np.loadtxt('dis.txt')

time = dis[:1900, 0]
disp = dis[:1900, 1]

plt.figure()
plt.plot(time, disp)
plt.grid()
plt.xlabel("Time (second) ")
plt.xlim([0, 20])
plt.ylabel("Displacements (meter)  ")
plt.savefig('input_motion.pdf', bbox_inches='tight')
Example #42
0
import numpy as np


for fileID in [2]:
    with open( dirIn + allfiles[fileID]) as f:

				    lines = (line for line in f if not line.startswith('#'))
				    allCl = np.loadtxt(lines, skiprows=1)

    l = allCl[:, lID[fileID] ]
    Cl = allCl[:, ClID[fileID]]
    emax = allCl[:, emaxID[fileID]]
    emin = allCl[:, eminID[fileID]]

    print l.shape


				
    plt.figure(99)
    plt.errorbar(l, Cl, yerr=[emax, emin], fmt='x', label = allfiles[fileID][2:-4], alpha = 0.8, ms=1)


#plt.yscale('log')
#plt.xscale('log')
plt.xlim([0,3500])
plt.ylim([-1000,10000])
plt.legend()
#plt.show()

#print allCl.shape
Example #43
0
def longStats(af = 4, filter=None):
    dataDirectory = "/data/routeviews/archive.routeviews.org/route-views.linx/bgpdata/"
    
    space = 1
    yearRange = range(2004, 2017)
    #monthRange = range(1,13)
    monthRange = [6] #range(1,13)
    day = 15
    dateRange = []

    tier1 = {"3356":[], "1299":[], "174":[] ,"2914":[],"3257":[]}#, "6453":[], "3491":[], "701":[], "1239":[], "6762":[]}

    # Find the first RIB files for each year
    ribFiles = []
    for ye in yearRange:
        for month in monthRange:
            ribs = glob.glob(dataDirectory+"{ye}.{mo:02d}/RIBS/rib.{ye}{mo:02d}{da:02d}.*.bz2".format(ye=ye, mo=month, da=day))
            ribs.sort()
            if len(ribs) < 1:
                continue
            ribFiles.append(((ye,month,day),ribs[0]))
            dateRange.append(datetime.datetime(ye,month,day))

    outDir = "../results/longStats_space%s_ipv%s/" % (space, af)
    try:
        os.makedirs(os.path.dirname(outDir))
    except OSError as exc: # Guard against race condition
        if exc.errno != errno.EEXIST:
            raise

    plt.figure()
    ccmap = mpl.cm.get_cmap('copper_r')
    # Using contourf to provide my colorbar info, then clearing the figure
    Z = [[0,0],[0,0]]
    CS3 = plt.contourf(Z, yearRange, cmap=ccmap)
    plt.clf()
    for i, (date, ribFile) in enumerate(ribFiles):
        print date
        if filter is None:
            centralityFile = outDir+"/%s%02d%02d_af%s.pickle" % (date[0], date[1], date[2], af)
            fList = None
        else:
            centralityFile = outDir+"/%s%02d%02d_AS%s_af%s.pickle" % (date[0], date[1], date[2],filter, af)
            fList = [filter]

        if not os.path.exists(centralityFile):
            rtree, _ = ashash.readrib(ribFile, space, af, filterAS=fList) 
            asAggProb, asProb, _ = ashash.computeCentrality(rtree.search_exact("0.0.0.0/0").data, space, filterAS=filter)
            pickle.dump((asAggProb, asProb), open(centralityFile, "wb"))
        else:
            asAggProb, asProb = pickle.load(open(centralityFile,"rb"))

        if asAggProb is None or len(asAggProb) < 1:
            continue

        if filter is None and af==4:
            for k,v in tier1.iteritems():
                v.append(asAggProb[str(k)])

        if not filter is None:
            del asAggProb[str(filter)]


        sortedAS = sorted(asAggProb, key=lambda k: asAggProb[k], reverse=True) 
        i=0
        while i<len(sortedAS) and asAggProb[sortedAS[i]]>.05:
            print "%s: %s" % (sortedAS[i], asAggProb[sortedAS[i]])
            i+=1

        eccdf(asAggProb.values(), lw=1.3, label=date[0],c=ccmap(i/float(len(ribFiles)) ) )
        # print date
        # maxKey = max(asAggProb, key=asAggProb.get)
        # print "AS%s = %s" % (maxKey, asAggProb[maxKey]) 

    plt.grid(True)
    #plt.legend(loc="right")
    plt.colorbar(CS3)
    plt.xscale("log")
    plt.yscale("log")
    #plt.yscale("log")
    # plt.xlim([10**-8, 10**-2])
    if filter is None:
        plt.xlim([10**-7, 1.1])
    else:
        plt.xlim([10**-7, 1.1])
    # plt.ylim([10**-3, 1.1])
    plt.xlabel("AS hegemony")
    plt.ylabel("CCDF")
    plt.tight_layout()
    if filter is None:
        plt.title("IPv%s" % af)
        plt.savefig(centralityFile.rpartition("/")[0]+"/hegemonyLongitudinal_af%s.eps" % af)
    else:
        plt.title("AS%s (IPv%s)" % (filter, af))
        plt.savefig(centralityFile.rpartition("/")[0]+"/hegemonyLongitudinal_AS%s_af%s.eps" % (filter, af))

    if filter is None and af==4:
        fig = plt.figure(figsize=(10,3))
        for k,v in tier1.iteritems():
            plt.plot(dateRange, v, label="AS"+k)
        plt.ylim([0,0.3])
        plt.grid(True)
        plt.ylabel("AS hegemony")
        plt.xlabel("Time")
        plt.legend(loc="center", bbox_to_anchor=(0.5, 1), ncol=len(tier1))
        plt.tight_layout()
        plt.savefig(centralityFile.rpartition("/")[0]+"/tier1.eps")
Example #44
0
right = 0.97  # the right side of the subplots of the figure
bottom = 0.1  # the bottom of the subplots of the figure
top = 0.96  # the top of the subplots of the figure
wspace = 0.2  # the amount of width reserved for blank space between subplots
hspace = 0.15  # the amount of height reserved for white space between subplots
plt.subplots_adjust(left=left,
                    bottom=bottom,
                    right=right,
                    top=top,
                    wspace=wspace,
                    hspace=hspace)

ax1 = plt.subplot(1, 1, 1)
plt.plot(time_ms, disI_2p2 / 1000.0, color='red', label='2.2kV')
plt.plot(time_ms, disI_1p8 / 1000.0, color='orange', label='1.8kV')
plt.xticks(fontsize=12)
plt.xlabel(r't [$\mu$s]', fontsize=16)
plt.xlim(0, 198)
plt.yticks(fontsize=12)
plt.ylabel('Discharge Current kA', fontsize=16)
plt.ylim(0, 80)
#plt.vlines(0,0,80,color='gray',linestyle='dotted',linewidth=3.5)

leg = plt.legend(loc='upper right', fontsize=12, frameon=False, handlelength=5)

savefilename = 'Discharge_current_compare_forAPSDPP18.png'
#savefilename='Discharge_current_'+date+'_shot'+str(shot)+'_forAPSDPP18.png'
savefile = os.path.normpath(savefilename)
#plt.savefig(savefile,dpi=600,facecolor='w',edgecolor='k')
#plt.clf()
#plt.close()
Example #45
0
def peerSensitivity():
    space = 1
    af = 4
    allasCount = {}
    resultsFile = "../results/peerSensitivity/KLdiv_%s.pickle"
    collectorsDataFile = "../results/peerSensitivity/collectorData.pickle"
    allPeersFile = "../results/peerSensitivity/allPeersDist.pickle"
    collectorsDist = []
    results = defaultdict(list)

    if not os.path.exists(resultsFile % "Betweenness"):
        ribFiles = glob.glob("/data/routeviews/archive.routeviews.org/*/*/RIBS/rib.20160601.0000.bz2")
        ribFiles.extend(glob.glob("/data/routeviews/archive.routeviews.org/*/*/*/RIBS/rib.20160601.0000.bz2"))
        ribFiles.extend(glob.glob("/data/ris/*/*/bview.20160601.0000.gz"))
        ribFiles.append("/data/bgpmon/ribs/201606/ribs") 
        
        for i, ribFile in enumerate(ribFiles):
            words = ribFile.split("/")
            if "routeviews" in ribFile:
                if words[-4] == "route-views3":
                    label = "rv3"
                elif words[-5] == "archive.routeviews.org" and words[-4] == "bgpdata":
                    label = "rv2"
                elif not "." in words[-5] and words[-5].startswith("route-views"):
                    label = "rv"+words[-5][-1]
                else:
                    label = words[-5].split(".")[-1]
            elif "ris" in ribFile:
                label = words[-3]
            else:
                label = "bgpmon"

            asCountFile = "../results/peerSensitivity/20160601.0000_asCount%s.pickle" % (i)
            if not os.path.exists(asCountFile):
                rtree, _ = ashash.readrib(ribFile, space, af) 
                asCount = rtree.search_exact("0.0.0.0/0").data
                asHegemony, _, nbPeers = ashash.computeCentrality(asCount, space)
                # asBetweenness = ashash.computeBetweenness(asCount, space)
                pickle.dump((asCount, asHegemony, nbPeers), open(asCountFile, "wb"))
            else:
                asCount, asHegemony, nbPeers = pickle.load(open(asCountFile,"rb"))


            collectorsDist.append( (label, nbPeers, asHegemony) )

            print "%s: %s peers" % (label, len(asCount)) 
            for peer, count in asCount.iteritems():
                if count["totalCount"]>2000000000:
                    if not peer in allasCount:
                        allasCount[peer] = count
                    else:
                        print "Warning: peer %s is observed multiple times (%s)" % (peer, ribFile)

        asHegemonyRef, _, nbPeers = ashash.computeCentrality(allasCount, space)
        asBetweennessRef, _, _ = ashash.computeBetweenness(allasCount, space)
        pickle.dump((asHegemonyRef, asBetweennessRef, nbPeers), open(allPeersFile,"wb"))
        

        for metricLabel, ref, computeMetric in [("Hegemony", asHegemonyRef, ashash.computeCentrality),
                ("Betweenness", asBetweennessRef, ashash.computeBetweenness)]:

            if not os.path.exists(resultsFile % metricLabel):
                # Remove AS with a score of 0.0
                toremove = [asn for asn, score in ref.iteritems() if score==0.0]
                for asn in toremove:
                    del ref[asn]

                minVal = min(ref.values())

                nbPeersList = range(0, len(allasCount), 10)
                nbPeersList[0] = 1

                for nbPeers in nbPeersList:
                    tmp = []
                    for _ in range(10):

                        # Randomly select peers
                        peersIndex = random.sample(range(len(allasCount)), nbPeers)

                        asCount = {}
                        for p in peersIndex:
                            asCount[allasCount.keys()[p]] = allasCount.values()[p]

                        asMetric, _, _ = computeMetric(asCount, space)

                        # Remove AS with a score == 0.0
                        toremove = [asn for asn, score in asMetric.iteritems() if score==0.0]
                        if not toremove is None:
                            for asn in toremove:
                                del asMetric[asn]

                        # Set the same number of ASN for both distributions
                        missingAS = set(ref.keys()).difference(asMetric.keys())
                        if not missingAS is None:
                            for asn in missingAS:
                                asMetric[asn] = minVal

                        # Compute the KL-divergence
                        dist = [asMetric[asn] for asn in ref.keys()]
                        kldiv = sps.entropy(dist, ref.values())
                        tmp.append(kldiv)


                    results[metricLabel].append(tmp)
                    print tmp

                # save final results
                pickle.dump((nbPeersList, results[metricLabel]),open(resultsFile % metricLabel,"wb"))
                pickle.dump(collectorsDist, open(collectorsDataFile,"wb"))
            else:
                (nbPeersList, results[metricLabel]) = pickle.load(open(resultsFile % metricLabel,"rb"))
                collectorsDist = pickle.load(open(collectorsDataFile,"rb"))

    else:
        for metricLabel in ["Hegemony", "Betweenness"]:
            nbPeersList, results[metricLabel] = pickle.load(open(resultsFile % metricLabel,"rb"))
        collectorsDist = pickle.load(open(collectorsDataFile,"rb"))
        asHegemonyRef, asBetweennessRef, allFullFeedPeers = pickle.load(open(allPeersFile,"rb"))
        #return asHegemonyRef

    def plotRef(references, legendFmt="%s", alpha=1.0):
        for asDistRef, metricLabel, color in references:
            m = np.mean(results[metricLabel][1:], axis=1)
            s = np.std(results[metricLabel][1:], axis=1)
            # mi = m-np.min(results[metricLabel][1:], axis=1)
            # ma = np.max(results[metricLabel][1:], axis=1)-m
            # mi, ma = sms.DescrStatsW(results[metricLabel][1:]).tconfint_mean(alpha=0.1)
            mi = np.min(results[metricLabel][1:], axis=1)
            ma = np.max(results[metricLabel][1:], axis=1)
            x = nbPeersList[1:]

            plt.fill_between(x, mi, ma, facecolor=color, alpha=alpha*0.5)
            # plt.plot(x, m,"-+", ms=4, color="0.6", label="Randomly selected") 
            # plt.plot(x, m,"-+", ms=4, color="0.6", label="Random peers (%s)" % metricLabel) 
            try:
                plt.plot(x, m,"-o", ms=3, label=legendFmt % metricLabel, color=color, alpha=alpha) 
            except TypeError:
                plt.plot(x, m,"-o", ms=3, label=legendFmt, color=color, alpha=alpha) 
            # plt.errorbar(x,m, [mi, ma], fmt="C3.", ms=4)
            plt.xlabel("Number of peers")
            plt.ylabel("KL divergence")
            # plt.yscale("log")
            plt.legend()
            plt.tight_layout()
    
    # Compare betweenness and hegemony
    plt.figure()
    plotRef([(asHegemonyRef, "Hegemony", "C0"), (asBetweennessRef, "Betweenness", "C5")])
    plt.ylim([0.00001, 1])
    plt.xscale("log")
    plt.savefig("../results/peerSensitivity/meanKL_%s.pdf" % metricLabel)

    # Compare collectors and random sample (hegemony):
    # Remove AS with a score of 0.0
    toremove = [asn for asn, score in asHegemonyRef.iteritems() if score==0.0]
    for asn in toremove:
        del asHegemonyRef[asn]
    minVal = min(asHegemonyRef.values())
    plt.figure()
    plotRef([(asHegemonyRef, "Hegemony", "C0")], legendFmt="Random peers", alpha=0.5)
    plt.xlabel("Number of peers")
    plt.ylabel("KL divergence")
    for collectorLabel, nbPeers, asHegemony in collectorsDist:
        if asHegemony is None :
            print "warning: ignore collector %s" % collectorLabel
            continue

        # Remove AS with a score == 0.0
        toremove = [asn for asn, score in asHegemony.iteritems() if score==0.0]
        if not toremove is None:
            for asn in toremove:
                del asHegemony[asn]

        # Set the same number of ASN for both distributions
        missingAS = set(asHegemonyRef.keys()).difference(asHegemony.keys())
        if not missingAS is None:
            for asn in missingAS:
                asHegemony[asn] = minVal

        # Compute the KL-divergence
        dist = [asHegemony[asn] for asn in asHegemonyRef.keys()]
        kldiv = sps.entropy(dist, asHegemonyRef.values())
        print "%s:\t %s peers \t  %s " % (collectorLabel, nbPeers, kldiv)
        if kldiv>0.4 :
            continue
        if collectorLabel.startswith("rrc"):
            plt.plot(nbPeers, kldiv,"C1x", label="RIS")
            collectorLabel = collectorLabel.replace("rrc","")
        elif collectorLabel == "bgpmon":
            plt.plot(nbPeers, kldiv,"C3^")
        else:
            plt.plot(nbPeers, kldiv,"C3+", label="Route Views")
        if kldiv<1 or nbPeers>10:
            plt.text(nbPeers+0.005, kldiv+0.005, collectorLabel, fontsize=8)

    # plt.yscale("log")
    # plt.xscale("log")
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = OrderedDict(zip(labels, handles))
    plt.legend(by_label.values(), by_label.keys())
    plt.ylim([0.00001, 0.4])
    plt.xlim([9, 50])
    plt.tight_layout()
    plt.savefig("../results/peerSensitivity/collectorDiversity.pdf")


    return (nbPeersList, results)
Example #46
0
            zh = float(line[3])

    f.close()

    x = x - xh
    y = y - yh
    z = z - zh

    #plt.figure(figsize=(20,6))

    plt.subplot(131)
    #plt.scatter(x,y,c=colors[j], marker=shapes[j])
    plt.hist2d(x, y, bins=(xbins, xbins), norm=LogNorm())
    plt.xlabel(r'$x$ (kpc/$h$)', fontsize=18)
    plt.ylabel(r'$y$ (kpc/$h$)', fontsize=18)
    plt.xlim(-200, 200)
    plt.ylim(-200, 200)

    plt.subplot(132)
    #plt.scatter(x,z,c=colors[j], marker=shapes[j])
    plt.hist2d(x, z, bins=(xbins, xbins), norm=LogNorm())
    plt.xlabel(r'$x$ (kpc/$h$)', fontsize=18)
    plt.ylabel(r'$z$ (kpc/$h$)', fontsize=18)
    plt.xlim(-200, 200)
    plt.ylim(-200, 200)

    plt.subplot(133)
    #plt.scatter(y,z,c=colors[j], marker=shapes[j])
    plt.hist2d(y, z, bins=(xbins, xbins), norm=LogNorm())
    plt.xlabel(r'$y$ (kpc/$h$)', fontsize=18)
    plt.ylabel(r'$z$ (kpc/$h$)', fontsize=18)
Example #47
0
    plt.subplots(figsize=(20, 8))
    #make 5000 noise and 1000 of each x sample
    N_samples = 1000
    noise = np.random.randn(5 * N_samples, noise_dim).astype('float32')
    x_gen = np.repeat(xgen, 1000)
    x_gen = x_gen.reshape(5000, 1)
    #plug into posterior
    z_samples = posterior(x_gen, noise)
    z_samples = tf.reshape(z_samples, [xgen.shape[0], N_samples, 2]).eval()
    for i in range(5):
        plt.subplot(2, 5, i + 1)
        sns.kdeplot(z_samples[i, :, 0], z_samples[i, :, 1], cmap='Greens')
        plt.axis('square')
        plt.title('q(z|x={})'.format(y[i]))
        plt.xlim([xmin, xmax])
        plt.ylim([xmin, xmax])
        plt.xticks([])
        plt.yticks([])
        plt.subplot(2, 5, 5 + i + 1)
        plt.contour(xrange,
                    xrange,
                    np.exp(logprior + llh[i]).reshape(300, 300).T,
                    cmap='Greens')
        plt.axis('square')
        plt.title('p(z|x={})'.format(y[i]))
        plt.xlim([xmin, xmax])
        plt.ylim([xmin, xmax])
        plt.xticks([])
        plt.yticks([])
    plt.savefig('JCADVgudlog Final Fig')
Example #48
0
def pathDerivativeDist():
    space = 1
    af = 4

    outDir = "../results/longStats_space%s_ipv%s/" % (space, af)
    dataDirectory = "/data/routeviews/archive.routeviews.org/route-views.linx/bgpdata/"
    filename = dataDirectory+"2016.06/UPDATES/updates.20160601.0000.bz2"

    centralityFile = outDir+"/%s%02d%02d_af%s.pickle" % (2016, 6, 15, af)
    asAggProb, asProb = pickle.load(open(centralityFile,"rb"))

    allDiff = []
    linDiff = []
    if filename.startswith("@bgpstream:"):
        p1 = Popen(["bgpreader", "-m", "-w", filename.rpartition(":")[2], "-p", "routeviews", "-c", "route-views.linx", "-t", "updates"], stdout=PIPE)
    else:
        p1 = Popen(["bgpdump", "-m", "-v", filename],  stdout=PIPE, bufsize=-1)
    
    for line in p1.stdout:
        res = line[:-1].split('|',15)

        if res[5] == "0.0.0.0/0":
            continue
        
        if af != 0:
            if af == 4 and ":" in res[5]:
                continue
            elif af == 6 and "." in res[5]:
                continue

        if res[2] == "W":
            continue
        else:
            zTd, zDt, zS, zOrig, zAS, zPfx, sPath, zPro, zOr, z0, z1, z2, z3, z4, z5 = res

            path = list(unique_justseen(sPath.split(" ")))

            try :
                # hegeAll = map(lambda x: round(asAggProb[x],3), path[1:-1])
                hegeAll = map(lambda x: asAggProb[x], path)
                hegeDiff = np.diff(hegeAll)
                allDiff.extend(list(hegeDiff))
                
                N = 11
                dim = np.linspace(0,1,N)
                interpData = np.interp(dim, np.linspace(0,1,len(hegeAll)), hegeAll)
                linDiff.append(interpData)
            except Exception:
                # print path
                # print "New AS"
                continue

    plt.figure()
    ecdf(allDiff)
    plt.xlim([-0.1, 0.1])
    plt.xlabel("Hegemony derivative")
    plt.ylabel("CDF")
    plt.tight_layout()
    plt.savefig("../results/pathDerivative/derivativeDist.pdf")

    lda = np.array(linDiff)
    plt.figure()
    # plt.plot(dim, lda.mean(axis=0),"o-")
    plt.errorbar(dim, lda.mean(axis=0), lda.std(axis=0)/np.sqrt(N), fmt="o-")
    plt.xlabel("Relative position in the path")
    plt.ylabel("AS hegemony")
    plt.xlim([-0.03, 1.03])
    plt.tight_layout()
    plt.savefig("../results/pathDerivative/meanHegemony.pdf")


    return lda
Example #49
0
    def plot_options_greedy(self, sess, coord, saver):
        eigenvectors_path = os.path.join(
            os.path.join(self.config.stage_logdir, "models"),
            "eigenvectors.npy")
        eigenvalues_path = os.path.join(
            os.path.join(self.config.stage_logdir, "models"),
            "eigenvalues.npy")
        eigenvectors = np.load(eigenvectors_path)
        eigenvalues = np.load(eigenvalues_path)
        for k in ["poz", "neg"]:
            for option in range(len(eigenvalues)):
                # eigenvalue = eigenvalues[option]
                eigenvector = eigenvectors[
                    option] if k == "poz" else -eigenvectors[option]
                prefix = str(option) + '_' + k + "_"

                plt.clf()

                with sess.as_default(), sess.graph.as_default():
                    for idx in range(self.nb_states):
                        dx = 0
                        dy = 0
                        d = False
                        s, i, j = self.env.get_state(idx)

                        if not self.env.not_wall(i, j):
                            plt.gca().add_patch(
                                patches.Rectangle(
                                    (j, self.config.input_size[0] - i -
                                     1),  # (x,y)
                                    1.0,  # width
                                    1.0,  # height
                                    facecolor="gray"))
                            continue
                        # Image.fromarray(np.asarray(scipy.misc.imresize(s, [512, 512], interp='nearest'), np.uint8)).show()
                        feed_dict = {self.orig_net.observation: np.stack([s])}
                        fi = sess.run(self.orig_net.fi, feed_dict=feed_dict)[0]

                        transitions = []
                        terminations = []
                        for a in range(self.action_size):
                            s1, r, d, _ = self.env.fake_step(a)
                            feed_dict = {
                                self.orig_net.observation: np.stack([s1])
                            }
                            fi1 = sess.run(self.orig_net.fi,
                                           feed_dict=feed_dict)[0]
                            transitions.append(
                                self.cosine_similarity((fi1 - fi),
                                                       eigenvector))
                            terminations.append(d)

                        transitions.append(
                            self.cosine_similarity(np.zeros_like(fi),
                                                   eigenvector))
                        terminations.append(True)

                        a = np.argmax(transitions)
                        # if a == 4:
                        #   d = True

                        if a == 0:  # up
                            dy = 0.35
                        elif a == 1:  # right
                            dx = 0.35
                        elif a == 2:  # down
                            dy = -0.35
                        elif a == 3:  # left
                            dx = -0.35

                        if terminations[a] or np.all(
                                transitions[a] == np.zeros_like(
                                    fi)):  # termination
                            circle = plt.Circle(
                                (j + 0.5,
                                 self.config.input_size[0] - i + 0.5 - 1),
                                0.025,
                                color='k')
                            plt.gca().add_artist(circle)
                            continue

                        plt.arrow(j + 0.5,
                                  self.config.input_size[0] - i + 0.5 - 1,
                                  dx,
                                  dy,
                                  head_width=0.05,
                                  head_length=0.05,
                                  fc='k',
                                  ec='k')

                    plt.xlim([0, self.config.input_size[1]])
                    plt.ylim([0, self.config.input_size[0]])

                    for i in range(self.config.input_size[1]):
                        plt.axvline(i, color='k', linestyle=':')
                    plt.axvline(self.config.input_size[1],
                                color='k',
                                linestyle=':')

                    for j in range(self.config.input_size[0]):
                        plt.axhline(j, color='k', linestyle=':')
                    plt.axhline(self.config.input_size[0],
                                color='k',
                                linestyle=':')

                    plt.savefig(
                        os.path.join(
                            self.summary_path,
                            "SuccessorFeatures_" + prefix + 'policy.png'))
                    plt.close()
Example #50
0
# Create segment CD.
C = ga.Point2D(5, 1, name='C')
D = ga.Point2D(5, 4, name='D')
CD = ga.Segment2D(C, D)

# Plot the points and the segments.
A.plot()
B.plot()
C.plot(offset=(0.2, 0))
D.plot(offset=(0.2, 0))
AB.plot()
CD.plot()

# Compute first intersection point.
X, _, coords = AB.intersect_segment(CD, return_coords=True)
X.name = 'X'
X.plot(color='red', offset=(0.2, 0.2))

print('Intersection point is on AB at parametric coordinate: {}'.format(
    coords[0]))

print('Intersection point is on CD at parametric coordinate: {}'.format(
    coords[1]))

# Adjust the plot.
plt.axis('scaled')
plt.xlim(1, 7)
plt.ylim(0, 5)
plt.grid()
plt.show()
Example #51
0
    def plot_rssi(self, port):
        self.count = 0
        self.dt = 0.5
        self.tlen = 120
        # Generate mesh for plotting
        Y, X = np.mgrid[slice(0 - .5, 26 + 1.5, 1),
                        slice(0 - self.dt / 2, self.tlen + 1 -
                              self.dt / 2, self.dt)]
        Z = np.zeros_like(X)
        # X and Y are bounds, so Z should be the value *inside* those bounds.
        # Therefore, remove the last value from the Z array.
        Z = Z[:, :-1]
        logging.debug("Creating figure")
        fig = plt.figure()
        pcm = plt.pcolormesh(X,
                             Y,
                             Z,
                             vmin=-128,
                             vmax=0,
                             cmap=plt.cm.get_cmap('jet'))
        plt.xlim([0, self.tlen])
        plt.ylim([0, 26])
        plt.colorbar(label="Measured signal level [dB]")
        plt.ylabel("Channel number")
        plt.xlabel("Time [s]")
        plt.ion()
        logging.debug("Show plot window")
        plt.show()
        ch_min = 26
        ch_max = 0
        last_update = time.time()

        logging.info("Begin collecting data from serial port")
        while True:
            line = port.readline().rstrip()

            pkt_data = re.match(
                r"\[([-+]?\d+),\s*([-+]?\d+),\s*([-+]?\d+)\]\s*(.*)",
                line.decode(errors='replace'))
            if pkt_data:
                now = time.time()
                try:
                    iface_id = int(pkt_data.group(1))
                    timestamp = int(pkt_data.group(2))
                    count = int(pkt_data.group(3))
                    tidx = int(timestamp / (self.dt * 1000000)) % (Z.shape[1])
                except ValueError:
                    continue
                logging.debug("data: tidx=%d if=%d t=%d", tidx, iface_id,
                              timestamp)
                raw = pkt_data.group(4)
                resize = False
                for ch_ed in raw.split(","):
                    try:
                        pair = ch_ed.split(":")
                        ch = int(pair[0])
                        ed = float(pair[1])
                        if ch < ch_min:
                            ch_min = ch
                            resize = True
                        if ch > ch_max:
                            ch_max = ch
                            resize = True
                        Z[ch, tidx] = ed
                    except (ValueError, IndexError):
                        continue
                    #print("ch: %d ed: %d" % (ch, ed))
                if resize:
                    logging.debug("resize: %d %d" % (ch_min, ch_max))
                    plt.ylim([ch_min - .5, ch_max + .5])
                if now > last_update + 1:
                    last_update = now
                    #pcm = plt.pcolormesh(X, Y, Z)
                    pcm.set_array(Z.ravel())
                    pcm.autoscale()
                    pcm.changed()
            plt.pause(0.01)
Example #52
0
    return lambda t: d*t + y


if __name__ == '__main__':
    x0 = np.arange(-2, 2.5, 0.25)
    x1 = np.arange(-2, 2.5, 0.25)
    X, Y = np.meshgrid(x0, x1)
    
    X = X.flatten()
    Y = Y.flatten()

    grad = numerical_gradient(function_2, np.array([X, Y]).T).T

    plt.figure()
    plt.quiver(X, Y, -grad[0], -grad[1],  angles="xy",color="#666666")
    plt.xlim([-2, 2])
    plt.ylim([-2, 2])
    plt.xlabel('x0')
    plt.ylabel('x1')
    plt.grid()
    plt.draw()
    plt.show()


## Sequence Model

![](../images/lstm-many2one.png)

![](../images/lstm-many2many.png)

- The following graph is taken from [Christopher Olah's blog post: Understanding LSTM Networks](http://colah.github.io/posts/2015-08-Understanding-LSTMs/)
Example #53
0
    numbers = min_ind / 2.
    return numbers, min_val


high_num, err_h = get_best_match(higher_img, numbers_base)
low_num, err_l = get_best_match(lower_img, numbers_base)

video_num = np.floor(high_num) * 10 + low_num
video_num[25::25] = np.nan

#%%
plt.figure()
lab_t, = plt.plot(np.diff(stamp), label='timestamp')

dd = np.diff(video_num)
dd[dd < -50] = np.nan
lab_v, = plt.plot(dd * 20, label='video number')

plt.xlim((0, 12000))
plt.ylim((0, 550))
plt.legend(handles=[lab_t, lab_v])
plt.xlabel('frame number')
plt.ylabel('time difference (ms)')

plt.xlim((1800, 2000))
#%%
#plt.figure()
#plt.imshow(lower_img[1328]-numbers_base[-1])
#plt.figure()
#plt.imshow(higher_img[1328]-numbers_base[1])
Example #54
0
tr = np.transpose
import pandas as pd
import json
from wpdy.dvr import ExpDVR

j = json.load(open("out/params.json"))
dvr = ExpDVR(j["dvr_n"], j["dvr_x0"], j["dvr_xN"])
mass = j["mass"]
nstate = j["nstate"]
xs = np.array(pd.read_csv("out/xs.csv")["val"])
nx = len(xs)
ws = pd.read_csv("out/ws.csv")["val"]
ts = pd.read_csv("out/ts.csv")["val"]

its = [0,20,40,60,80]
for it in its:
    df = pd.read_csv("out/{0}/coef.csv".format(it))
    tmp  = np.array(df["re"] + 1j * df["im"])
    coef = tmp.reshape((len(xs), nstate))
    
    psi1 = coef[:,0] / np.sqrt(ws)
    psi2 = coef[:,1] / np.sqrt(ws)

    # pl, = plt.plot(xs, abs(psi1)**2, label="state 1")
    pl, = plt.plot(xs, psi1.real, label="state 1")
    plt.plot(      xs, psi2.real, color=pl.get_color(), linestyle="--", label="state 2")
plt.xlim(-11,11)
plt.savefig("psi.pdf")


Example #55
0
    def cov_plot(self, matrix, station="", hour="", date="", averaged=""):
        """ Basic plot for the correlation matrix """
        var = self.var_dics[self.var]['name']
        fig, ax = plt.subplots()
        date = self.date_prettyfier(date)
        hour = str(hour).replace('0', '00:00').replace('1', '12:00')
        if not averaged:
            title = "Stat: " + station + ', H: ' + hour + ', Date: ' + date + ', ' + var
            filename = 'Cov_' + station + '_hour_' + hour.replace(
                ':', '') + '_date_' + str(date).replace('/', '') + '_' + var

        elif averaged:
            title = var.replace(
                'temp', 'Temp.') + " , Stat: " + station + ', H: ' + str(
                    hour) + ', Date: ' + str(date)
            filename = 'Cov_' + station + '_hour_' + str(hour).replace(
                ':', '') + '_averaged_' + str(date).replace('/',
                                                            '') + '_' + var

        plt.title(title.replace('_', ' '), y=1.03, fontsize=self.font - 2)

        num = len(matrix[0, :])
        Num = range(num)

        vmin, vmax = -3, 3
        if self.var == 'direction':
            vmin, vmax = -10, 10
        color_map = plt.imshow(
            matrix,
            interpolation='nearest',
            cmap='RdYlBu',
            vmin=vmin,
            vmax=vmax
        )  # nearest serves for discreete grid  # cmaps blue, seismic
        plt.ylim(-0.5, 15.5)
        plt.xlim(-0.5, 15.5)
        plt.xticks(Num, Num)
        plt.xlabel('Pressure level an_dep [hPa]', fontsize=self.font - 2)
        plt.yticks(Num, Num)
        plt.ylabel('Pressure level fg_dep [hPa]', fontsize=self.font - 2)
        ax.set_xticklabels(labels=self.pretty_pressure,
                           fontsize=self.font - 4,
                           rotation=45)
        ax.set_yticklabels(labels=self.pretty_pressure, fontsize=self.font - 4)

        bar = plt.colorbar()
        bar.ax.set_ylabel("Covariance", fontsize=self.font)

        for i in Num:  # creating text labels
            for j in Num:
                value = '{0:.2f}'.format(matrix[i, j])
                text = ax.text(j,
                               i,
                               value,
                               ha='center',
                               va='center',
                               color='black',
                               fontsize=5)

        if not os.path.isdir('plots/covariances/' + station):
            os.mkdir('plots/covariances/' + station)
        plt.savefig('plots/covariances/' + station + '/' + filename + '.png',
                    bbox_inches='tight',
                    dpi=200)
        plt.close()
Example #56
0
       **{
           'family': 'sans-serif',
           'sans-serif': ['Helvetica']
       })
plt.rc('xtick.major', pad=10)
plt.rc('xtick.minor', pad=10)
plt.rc('ytick.major', pad=10)
plt.rc('ytick.minor', pad=10)

#rfull,xifull = np.loadtxt('../s2_out.dat',usecols=(0,1),unpack=True)

plt.plot(lr,
         np.log10(np.abs(xir)),
         linewidth=1.5,
         c='b',
         label=r'$\sigma_2^2(r)$ Gaussian (spline)')
plt.plot(lr,
         np.log10(np.abs(xir2)),
         linewidth=1.5,
         c='m',
         label=r'$\sigma_2^2(r)$ TH (Romberg)')
plt.ylim(-10.0, 8.0)
plt.xlim(-0.99, 2.5)
#plt.xlim(8.0,16.0);
plt.xlabel(r'\textbf{$\log_{10}(R/\rm Mpc)$}', labelpad=5)
plt.ylabel(r'$\log_{10}(\sigma^2_2(R))$', labelpad=10)
plt.title('power spectrum moment', fontsize=16)
plt.legend(loc='upper right')

plt.show()
io.quit()

# Calculate measures of interest from collected data
txtimes = results[:, 0]
expe2edelay = results[:, 1]
rxtimes = results[:, 2]

e2edelays = rxtimes - txtimes
msgintervals = txtimes[1:] - txtimes[:-1]

mintime = min(txtimes.min(), rxtimes.min())
maxtime = max(txtimes.max(), rxtimes.max())

statstr = "Min: %.3f, Max: %.3f, Mean: %.3f, Std: %.3f"
rtdstats = statstr % (e2edelays.min(), e2edelays.max(), e2edelays.mean(),
                      e2edelays.std())
expstats = statstr % (expe2edelay.min(), expe2edelay.max(), expe2edelay.mean(),
                      expe2edelay.std())

# Plot the results.
from matplotlib import pylab as pl
pl.xlim(mintime - 1.0, maxtime + 1.0)
pl.plot(txtimes, e2edelays, label="ioHub")
pl.plot(txtimes, expe2edelay, label="Experiment")
pl.xlabel("Time (msec)")
pl.ylabel("Round Trip Delay (msec)")
pl.legend()
pl.title("ioHub-ioSync Delay:: %s\nPsychoPy-ioSync Delay:: %s" %
         (rtdstats, expstats))
pl.show()
Example #58
0
    def outliers_example(self,
                         corr='',
                         out='',
                         date='',
                         N='',
                         lower='',
                         upper='',
                         median='',
                         flag='',
                         upper_s='',
                         lower_s='',
                         station='',
                         what=''):

        pressure = self.pretty_pressure_dic[str(self.an_p)]
        var = self.var_dics[self.var]['name']
        hour = str(self.hour).replace('0', '00:00').replace('1', '12:00')
        plt.title(var + ' ' + what + ' Outliers - Stat: ' + station + ', H: ' +
                  hour + ', P: ' + pressure + ' [hPa]',
                  y=1.03)

        corr_ = [n for n in corr if not np.isnan(n)]
        out_ = [n for n in out if not np.isnan(n)]

        num_a = '{:.1f}'.format(len(corr_) / len(out_ + corr_) * 100)
        num_o = '{:.1f}'.format(len(out_) / len(out_ + corr_) * 100)

        plt.scatter(date,
                    corr,
                    label='Accepted [' + num_a + '%]',
                    color='cyan',
                    s=3)
        plt.scatter(date,
                    out,
                    label='Outliers [' + num_o + '%]',
                    color='black',
                    s=3)
        X = [min(date), max(date)]

        plt.plot(X, [lower, lower], label='Lower', color='blue', ls='--')
        plt.plot(X, [upper, upper], label='Upper', color='red', ls='--')

        # adding the upper and lower values for skewed distributions
        plt.plot(X, [lower_s, lower_s],
                 label='Lower Skewed',
                 color='blue',
                 ls='-')
        plt.plot(X, [upper_s, upper_s],
                 label='Upper Skewed',
                 color='red',
                 ls='-')

        plt.plot(X, [median, median],
                 label='Median [' + '{:.1f}'.format(median) + ']',
                 color='black',
                 ls='--')

        plt.legend(fontsize=self.font - 6, loc='upper right', ncol=2)
        plt.grid(linestyle=':', color='lightgray', lw=1.2)

        plt.ylabel('Departure ' + self.var_dics[self.var]['units'],
                   fontsize=self.font)

        plt.xlabel('Date', fontsize=self.font)
        plt.xticks(rotation=45)

        out_c = [n for n in out if not np.isnan(n)]
        corr_c = [n for n in corr if not np.isnan(n)]

        plt.xlim(min(date) - 1 / 365, max(date) + 1 / 365)

        plt.ylim(-10, 10)

        plt.savefig('plots/outliers/outliers_' + flag + '_' + str(N) +
                    '_date_' + str(min(date)) + '_hour_' + self.hour + '_' +
                    self.var + '_anp_' + str(self.an_p) + '_fgp_' +
                    str(self.fg_p) + '.png',
                    bbox_inches='tight')
        plt.close()
    # Save the frequencies and smoothened spectral heat currents to text file
    np.savetxt('frequencies_and_ITC.txt', np.column_stack(
        (x_Frequency, y_ITC)))
    np.savetxt('frequencies_accumulated_ITC.txt',
               np.column_stack((x_Frequency, accumulated_ITC)))

    # *************************** Plotting ************************* (It can be reduced to a graph function)
    # Phonon transmission T(w)
    plt.plot(x_Frequency,
             T_w,
             '-',
             linewidth=3,
             label="Interfacial Phonon Transmission")
    plt.xlabel('w/(2*pi) (THz)')
    plt.ylabel('Transmission')
    plt.xlim(0, max(x_Frequency))  # Frequency range
    plt.ylim(0, max(T_w) + 5)  # It depends on your case
    plt.legend(fontsize=15, loc='best')
    plt.show()
    plt.savefig(fileprefix + '_Tw.eps')

    # Spectral thermal conductance
    plt.plot(x_Frequency,
             y_ITC,
             '-',
             linewidth=3,
             label="Spectral thermal conductance")
    plt.xlabel('w/(2*pi) (THz)')
    plt.ylabel('G(w) (GW/m^2/K/THz)')
    plt.xlim(0, max(x_Frequency))  # Frequency range
    plt.ylim(0, max(y_ITC) + (max(y_ITC) / 2))  # It depends on your case
Example #60
0
xx = np.zeros(nrun)
yy = np.zeros(nrun)

xx[0] = 0.5
yy[0] = 0.0

for irun in range(1, nrun):
    if np.mod(irun, 10000) == 0:
        print(irun)

    rand = random.random()
    if rand < 0.02:
        xx[irun] = 0.5
        yy[irun] = 0.27 * yy[irun - 1]
    elif rand >= 0.02 and rand < 0.17:
        xx[irun] = -0.139 * xx[irun - 1] + 0.263 * yy[irun - 1] + 0.57
        yy[irun] = 0.246 * xx[irun - 1] + 0.224 * yy[irun - 1] - 0.036
    elif rand >= 0.17 and rand < 0.3:
        xx[irun] = 0.17 * xx[irun - 1] - 0.215 * yy[irun - 1] + 0.408
        yy[irun] = 0.222 * xx[irun - 1] + 0.176 * yy[irun - 1] - 0.0893
    else:
        xx[irun] = 0.781 * xx[irun - 1] + 0.034 * yy[irun - 1] + 0.1075
        yy[irun] = -0.032 * xx[irun - 1] + 0.739 * yy[irun - 1] + 0.27

pl.plot(xx, yy, 's', markersize=1)
pl.xlim(0, 1)
pl.ylim(0, 1)
pl.xlabel(r'X', fontsize=20)
pl.ylabel(r'Y', fontsize=20)
pl.show()