コード例 #1
0
ファイル: plotter.py プロジェクト: OliStein/minions
 def shist(self,data,sflag,spath,sname,pflag):
     g.tprinter('Running sist',pflag)
     xdata=data[:,0]
     ydata=data[:,1]
     plt.plot(xdata,ydata,'ro')
     plt.savefig(os.path.join(spath,str(sname))+'.pdf')
     plt.close()
コード例 #2
0
def genCurve(dataSet, tree):
	x = [] # stores the x axis of the graph
	trainList = [] # the list of accuracies derived from training data
	valList = [] # the list of accuracies derived from validation data
	i = 0
	while i < 1: 
		i = i+0.1
		a = 0
		b = 0
		for trial in range(3):
			newData = sortData(dataSet, i) # MAKE THIS
			tree = getTree(newData) # NEED TO GET THIS FUNCTION WHEN TREEGEN WORKS
			a = a + model_validation.validateTree(tree, newData)
			b = b + model_validation.validateTree(tree, newData)
		a = float(a)/3
		b = float(b)/3

		trainList.append(a)
		valList.append(b)
		x.append(i)

	plt.plot(x, trainList)
	plt.plot(x, valList)
	plt.xlabel('percent training used')
	plt.ylabel('percent accuracy')
	plt.title('learning curve')
	plt.show()
コード例 #3
0
def plotRaster(clustaArray=[]):

    if len(clustaArray) < 1:
        print "Nothing to plot!"
    else:
        # create list of times that maps to each spike
        p_sptimes = []
        for a in clustaArray:
            for b in a.spike_samples:
                p_sptimes.append(b)
        sptimes = np.array(p_sptimes)

        p_clusters = []
        for c in clustaArray:
            for d in c.id_of_spike:
                p_clusters.append(c.id_of_clusta)
        clusters = np.array(p_clusters)

        # dynamically generate cluster list
        clusterList = []
        for a in clustaArray:
            clusterList.append(a.id_of_clusta)
        # plot raster for all clusters

        # nclusters = 20

        # #for n in range(nclusters):
        timesList = []
        for n in clusterList:
            # if n<>9:
            ctimes = sptimes[clusters == n]
            timesList.append(ctimes)
            plt.plot(ctimes, np.ones(len(ctimes)) * n, "|")
        plt.show()
コード例 #4
0
ファイル: sk.py プロジェクト: jdestupinan1332/TareasComp
def plotsig (ReconSig, electrode):
    """
    plots the reconstructed signal and the original
    
    in: ReconSig, the reconstructed signal
        electrode, the original signal
    """
    plt.plot (ReconSig)
    plt.plot (electrode)
    plt.show
コード例 #5
0
ファイル: Rbfc.py プロジェクト: b14ckfir3/Rbfc
    def show_data_set_plot(self):

        data_plots = []
        for i in range(self.data_set.shape[1]):
            data_plots.append(go.Scatter(x=list(range(self.data_set.shape[0])), y=self.data_set[:, i], mode="markers", name="Characteristic "+str(i+1)))

        plot(data_plots, filename="Dataset.html")

        # Add delay between plots showing to avoid crashing of browser
        time.sleep(1.5)
コード例 #6
0
ファイル: pandasCore.py プロジェクト: dsmiff/pandasUtils
 def plotDataFrame(self, variables):
     try:
         import matplotlib.pyplot as plt
     except ImportError:
         print "Unable to import matplotlib"
     plt.plot(self.df[variables[0]], self.df[variables[1]])
     plt.xlabel(r"{}".format(variables[0]))
     plt.ylabel(r"$P$")
     plt.minorticks_on()
     plt.show()
コード例 #7
0
ファイル: lag_hydro.py プロジェクト: jhansel/radhydro
def plot2D(x,y,x_ex,y_ex,ylabl):

    #static variable counter
    plot2D.fig_num += 1

    plt.subplot(2,2,plot2D.fig_num)
    plt.xlabel('$x$ (cm)')
    plt.ylabel(ylabl)
    plt.plot(x,y,"b+-",label="Lagrangian")
    plt.plot(x_ex,y_ex,"r--",label="Exact")
    plt.savefig("var_"+str(plot2D.fig_num)+".pdf")
コード例 #8
0
ファイル: sift.py プロジェクト: maxschommer/Multirotors
def draw_circle(c,r):
	t = arange(0,1.01,.01)*2*pi
	x = r*cos(t) + c[0]
	y = r*sin(t) + c[1]
	plt.plot(x,y,'b',linewidth=2)
	plt.imshow(im)
	if circle:
		for p in locs:
			plt.draw_circle(p[:2],p[2])
	else:
		plt.plot(locs[:,0],locs[:,1],'ob')
	plt.axis('off')
コード例 #9
0
ファイル: muscl_hanc.py プロジェクト: jhansel/radhydro
def plot2D(x,y,ylabl,x_ex=None,y_ex=None):

    #static variable counter
    plot2D.fig_num += 1

    plt.subplot(2,2,plot2D.fig_num)
    plt.xlabel('$x$ (cm)')
    plt.ylabel(ylabl)
    plt.plot(x,y,"b+-",label="Numerical")
    if (x_ex != None):
        plt.plot(x_ex,y_ex,"r-x",label="Exact")

    plt.savefig("var_"+str(plot2D.fig_num)+".pdf")
コード例 #10
0
ファイル: visualize.py プロジェクト: AhmedKamalAK/py_ml_utils
def viz_losses(filename, losses):
  if '.' not in filename: filename += '.png'

  x = history['epoch']
  legend = losses.keys

  for v in losses.values: plt.plot(np.arange(len(v)) + 1, v, marker='.')

  plt.title('Loss over epochs')
  plt.xlabel('Epochs')
  plt.xticks(history['epoch'], history['epoch'])
  plt.legend(legend, loc = 'upper right')
  plt.savefig(filename)
コード例 #11
0
def get_trajectories_for_DF(DF):
    GetXYS=ct.get_xys(DF)
    xys_s=ct.get_xys_s(GetXYS['xys'],GetXYS['Nmin'])
    plt.figure(figsize=(5, 5),frameon=False)
    for m in list(range(9)):
        plt.plot()
        plt.subplot(3,3,m+1)
        xys_s_x_n=xys_s[m]['X']-min(xys_s[m]['X'])
        xys_s_y_n=xys_s[m]['Y']-min(xys_s[m]['Y'])
        plt.plot(xys_s_x_n,xys_s_y_n)
        plt.axis('off')
        axes = plt.gca()
        axes.set_ylim([0,125])
        axes.set_xlim([0,125])
コード例 #12
0
def plotHistogram(clustaArray=[]):
    if len(clustaArray) < 1:
        print "Nothing to plot!"
    else:
        # create list of times that maps to each spike
        p_sptimes = []
        for a in clustaArray:
            for b in a.spike_samples:
                p_sptimes.append(b)
        sptimes = np.array(p_sptimes)

        p_clusters = []
        for c in clustaArray:
            for d in c.id_of_spike:
                p_clusters.append(c.id_of_clusta)
        clusters = np.array(p_clusters)

        # dynamically generate cluster list
        clusterList = []
        for a in clustaArray:
            clusterList.append(a.id_of_clusta)

        # plot raster for all clusters
        # nclusters = 20

        # #for n in range(nclusters):
        timesList = []
        for n in clusterList:
            # if n<>9:
            ctimes = sptimes[clusters == n]
            timesList.append(ctimes)
            # plt.plot(ctimes, np.ones(len(ctimes))*n, '|')
        # plt.show()

        # plot frequency in Hz over time
        dt = 1 / 30000.0  # in seconds
        binSize = 1  # in seconds
        binSizeSamples = round(binSize / dt)
        recLen = np.max(sptimes)
        nbins = round(recLen / binSizeSamples)

        binCount = []
        cluster = 3
        for b in np.arange(0, nbins - 1):
            n = np.sum((timesList[cluster] > b * binSizeSamples) & (timesList[cluster] < (b + 1) * binSizeSamples))
            binCount.append(n / binSize)  # makes Hz

        plt.plot(binCount)
        plt.ylim([0, 20])
        plt.show()
コード例 #13
0
def animate_plotting(subdir_path,):
    average_filename = 'averaged_out.txt'  
    if os.path.exists( os.path.join(subdir_path,average_filename) ):
            print(subdir_path+average_filename+' already exists please use hotPlot.py')        
            #import existing data for average at the end           
#            data_out = numpy.genfromtxt(os.path.join(subdir_path,average_filename))
#            averaged_data = numpy.array(data_out[:,1])
#            angles = data_out[:,0]
            #os.remove( os.path.join(subdir_path,average_filename))
    else:
        files = os.listdir(subdir_path)     
            #files = [d for d in os.listdir(subdir_path) if os.path.isdir(os.path.join(subdir_path, d))]
        onlyfiles_path = [os.path.join(subdir_path,f) for f in files if os.path.isfile(os.path.join(subdir_path,f))]
        onlyfiles_path = natsort.natsorted(onlyfiles_path)          
        averaged_data = []
        angles = []
        for f in onlyfiles_path:
            data = numpy.genfromtxt(f,delimiter = ',')       
            #data = pandas.read_csv(f)
            averaged_data.append(numpy.mean(data))
            angle = os.path.basename(f).split('_')[0]
            angles.append(float(angle))
        fig = plt.plot(angles, averaged_data,'o')
        plt.yscale('log')
        plt.xscale('log')
        plt.legend(loc='upper right')
        plt.title(base_path)
        plt.grid(True)
        plt.xlabel(r'$\theta$ $[deg.]}$')
        #plt.xlabel(r'$\mathrm{xlabel\;with\;\LaTeX\;font}$')
        plt.ylabel(r'I($\theta$) $[a.u.]$')
コード例 #14
0
def plotEqDistn(r1, r2, board):
    xs = []
    ys =[]
    
    handCount = 0.0
    
    for hand in r1.getHandsSortedAndEquities(r2, board):
        #plot hand at (handCount, equity) and (handCount + r1.getFrac(hand[0]), equity)
        xs.append(handCount)
        handCount += r1.getFrac(hand[0])
        xs.append(handCount)
        ys.append(hand[1])
        ys.append(hand[1])
        
    
    
    plot (xs, ys)
コード例 #15
0
ファイル: Output.py プロジェクト: colemathis/BlackQueen
def plot_data():
	import matplotlib.pylab as plt
	t, suffering = np.loadtxt('suffering.dat', unpack= True, usecols = (0,1))
	t, fitness = np.loadtxt('fitness.dat', unpack = True, usecols = (0,1))


	plt.plot(t, suffering)
	plt.xlabel('t')
	plt.ylabel('suffering')
	plt.savefig('suffering.png')
	plt.close()

	plt.plot(t, fitness)
	plt.xlabel('t')
	plt.ylabel('fitness')
	plt.savefig('fitness.png')
	plt.close()
コード例 #16
0
ファイル: p1q2.py プロジェクト: masbicudo/Trabalhos-UFRJ
def grafico(e):
    plt.plot(k, np.repeat(dfa, N+1), 'k-')
    plt.plot(k, vff, 'go')
    plt.plot(k, vaf, 'ro')
    plt.plot(k, vbf, 'bo')
    
    plt.axis([0, N, dfa - e, dfa + e])
    
    plt.grid(True)
    plt.show()
コード例 #17
0
ファイル: blocking.py プロジェクト: kleikanger/vmc
def blca(datafile,numprocs): 
	#run c++ blocking routine, saves txt data file with blocking data
	os.system("make --silent")
	os.system("mpirun -n %i blocking.out 100 3000 2 %i %s"%(nprocs,numprocs,datafile))#VMC
#os.system("mpirun -n %i blocking.out 5000 100 20000 %i %s"%(nprocs,numprocs,datafile))#DMC
	#read txt file and save plot
	data = np.genfromtxt(fname=datafile+'.txt')
	fig=plt.figure()
	plt.plot(data[:,0],data[:,2],'k+')
	plt.xlabel(r'$\tau_{trial}$', size=20)
	plt.ylabel(r'$\epsilon$', size=20)
	plt.xlim(np.min(data[:,0]),np.max(data[:,0]))
	plt.ylim(np.min(data[:,2]),np.max(data[:,2]))
	fig.savefig(datafile+'.eps',format='eps')
	#open plot if -p in argv
	if plot_res:
		os.system('evince %s%s '%(datafile+'.eps','&'))
	print("plot saved : %s"%(datafile+'.eps'))
コード例 #18
0
def plotWaveforms(clustaArray=[]):
    if len(clustaArray) < 1:
        print "Nothing to plot!"
    else:
        clustaToPlot = int(raw_input("Please enter the cluster id to plot: "))

        i = 0
        while i < len(clustaArray):
            if clustaToPlot == clustaArray[i].id_of_clusta:
                j = 0
                while j < len(clustaArray[i].waveforms):
                    k = 0
                    while k < len(clustaArray[i].waveforms[j]):
                        plt.plot([k], [clustaArray[i].waveforms[j][k]], "ro")
                        k = k + 1
                    j = j + 1
            i = i + 1
        plt.show()
コード例 #19
0
ファイル: SFDR_plot.py プロジェクト: umass-wares/wares_spec
def plot_sinad_sfdr (label, data_x, data_y, chans=[0,1,2,3],
                     titles=['SFDR','SINAD']):
    """
    x   x values of data (same for all chans)
    y   array with shape (2, chans, data)
    """
    n=len(chans)    
    n2=len(titles)
    pos=np.arange(n2*n)+1

    for t in range(n2):
        pos_val=pos[t::n2]
        for chan in chans:
            plt.subplot(n,n2,pos_val[chan])
            plt.plot(data_x,data_y[t][chan],label=label)
            if t==0:
                plt.ylabel('Chan %i' %chan)
            if chan==0:
                plt.title(titles[t])
コード例 #20
0
ファイル: plotter.py プロジェクト: simphys/exercises
 def __save(self,n,plot,sfile):
     p.figure(figsize=sfile)
     p.xlabel(plot.xlabel)
     p.ylabel(plot.ylabel)
     p.xscale(plot.xscale)
     p.yscale(plot.yscale)
     p.grid()
     for curve in plot.curves: 
         if curve[1] == None: p.plot(curve[0],curve[2], label=curve[3])
         else: p.plot(curve[0],  curve[1], curve[2], label=curve[3])
     p.rc('legend', fontsize='small')
     p.legend(shadow=0, loc='best')
     p.axes().set_aspect(plot.aspect)
     if not plot.dir: plot.dir = './plots/'
     if not plot.name: plot.name = self.__global_name+'_%0*i'%(2,n)
     if not os.path.isdir(plot.dir): os.mkdir(plot.dir)
     if plot.pgf: p.savefig(plot.dir+plot.name+'.pgf')
     else: p.savefig(plot.dir+plot.name+'.pdf', bbox_inches='tight')
     p.close()
コード例 #21
0
ファイル: utils.py プロジェクト: toinsson/mlspeech
def print_for_test():
    f = h5py.File("lex-chocolate-1min.hdf5")
    ar = f.values()
    npar = np.array(ar)
    x1 = np.linspace(0, len(npar[0]), num=len(npar[0]))

    wavFile = wave.open("lex-chocolate-1min.wav")
    (nchannels, sampwidth, framerate, nframes, comptype, compname) = wavFile.getparams()
    frames = wavFile.readframes(-1)
    data = np.fromstring(frames, "Int16")
    x2 = np.linspace(0, len(data), num=len(data))

    f1 = plt.figure(1)
    plt.plot(x1*160, npar[0]*max(data), x2, data);
    f1.show()

    xA = np.linspace(0, len(A), num=len(A))
    f2 = plt.figure(2)
    plt.plot(xA*160/16000, A*max(data), x2/16000, data);
    f2.show()
コード例 #22
0
ファイル: Rbfc.py プロジェクト: b14ckfir3/Rbfc
    def show_membership_functions_plot(self):
        smoothing_value = 1
        for key, mem_funcs in enumerate(self._membership_functions):
            left_plot_smoothed = go.Scatter(x=list(range(len(mem_funcs.membership_function_left))),
                                            y=gf.smooth_data(mem_funcs.membership_function_left, smoothing_value), mode="lines",
                                            marker=go.Marker(color="#df80ff"), name="Left Membership function")
            right_plot_smoothed = go.Scatter(x=list(range(len(mem_funcs.membership_function_left))),
                                             y=gf.smooth_data(mem_funcs.membership_function_right, smoothing_value), mode="lines",
                                             marker=go.Marker(color="#8600b3"), name="Right Membership function")
            left_plot = go.Scatter(x=list(range(len(mem_funcs.membership_function_left))),
                                   y=mem_funcs.membership_function_left, mode="markers",
                                   marker=go.Marker(color="#df80ff"), opacity=0.3, name="Left Membership function")
            right_plot = go.Scatter(x=list(range(len(mem_funcs.membership_function_left))),
                                    y=mem_funcs.membership_function_right, mode="markers",
                                    marker=go.Marker(color="#8600b3"), opacity=0.3, name="Right Membership function")

            plot([left_plot_smoothed, right_plot_smoothed, left_plot, right_plot], filename="Characteristic {} membership function.html".format(key+1))
            # Add delay between plots showing to avoid crashing of browser
            time.sleep(1.5)

            """ # Create plots using matplotlib
コード例 #23
0
ファイル: signal001.py プロジェクト: tmarkovich/cs-comparison
def harmonic_inversion():
	import csv
	f = open('/Users/tmarkovich/Dropbox/Projects/CSComparisonPaper/signals/signal001.csv', 'rb')
	reader = csv.reader(f)
	signal = []
	for row in reader:
	    signal.append(row)
	f.close()
	signal = np.array(signal).astype(np.float)
	time = signal[:,0]
	signal = np.squeeze(signal[:,1])

	# Harmonic Inversion part of the comparison
	harminv = imp.load_source('harminv',"/Users/tmarkovich/Dropbox/Projects/cslibrary/harminv.py")

	dt = 1.0/4096
	n = len(signal[:,1])
	nf = 500
	harminv.dens = 1.4
	harminv.NF_MAX = 300
	harminv.dataCreate(n, signal[:,1], 0.0*dt, 0.1, nf)
	harminv.solve_once()
	harminv.compute_amps()
	reproduced = harminv.reproduce(time)
	plt.plot(time, np.real(reproduced) , color='b', linewidth=3)
	plt.plot(time, np.imag(reproduced) , color='r', linewidth=3)
	plt.plot(time, signal[:,1], color='g', linewidth=3)
コード例 #24
0
ファイル: PCA.py プロジェクト: zhibzeng/PythonCode
def execute():
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    x = random.normal(5, .5, 1000)
    y = random.normal(3, 1, 1000)
    a = x*cos(pi/4) + y*sin(pi/4)
    b = -x*sin(pi/4) + y*cos(pi/4)
    plt.plot(a, b, '.')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('原数据集')
    data = zeros((1000, 2))
    data[:, 0] = a
    data[:, 1] = b
    x, y, evals, evecs = pca(data, 1)
    print(y)
    plt.figure()
    plt.plot(y[:, 0], y[:, 1], '.')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('重新构造数据')
    plt.show()
コード例 #25
0
ファイル: centroid.py プロジェクト: barentsen/dave
def exampleDiffImgCentroiding():
    k2id =  206103150
    campaign = 3

    ar = mastio.K2Archive()
    fits, hdr = ar.getLongTpf(k2id, campaign, header=True)
    hdr0 = ar.getLongTpf(k2id, campaign, ext=0)
    cube = tpf.getTargetPixelArrayFromFits(fits, hdr)
    idx = np.isfinite(cube)
    cube[~idx] = 0  #Remove Nans

    flags = fits['QUALITY']
    ccdMod = hdr0['module']
    ccdOut = hdr0['output']

    #Compute roll phase
    llc = ar.getLongCadence(k2id, campaign)
    time= llc['TIME']
    cent1 = llc['MOM_CENTR1']
    cent2 = llc['MOM_CENTR2']
    centColRow = np.vstack((cent1, cent2)).transpose()
    rot = arclen.computeArcLength(centColRow, flags>0)
    rollPhase = rot[:,0]
    rollPhase[flags>0] = -9999    #A bad value

    prfObj = prf.KeplerPrf("/home/fergal/data/keplerprf")
    bbox = getBoundingBoxForImage(cube[0], hdr)

    period =  	4.1591409
    epoch = fits['time'][491]
    dur = 3.0

    out, log = measureDiffOffset(period, epoch, dur, time, prfObj, \
        ccdMod, ccdOut, cube, bbox, rollPhase, flags)

    idx = out[:,1] > 0
    mp.clf()
    mp.plot(out[:,3]-out[:,1], out[:,4]- out[:,2], 'ro')
    return out
コード例 #26
0
ファイル: gmapper.py プロジェクト: SkBlaz/GeneMapper
    def display(self, data, candidates, fname, display):
        
        finallist=[]
        for c in candidates:
            finallist.append(c[0])
        #print finallist
        part1 = finallist[:len(finallist)/2]
        part2 = finallist[len(finallist)/2:]
        
        meandiff=int(np.sqrt(np.power(np.mean(part2),2)-np.power(np.mean(part1),2)))
        rangeA = max(part1)-min(part1)
        rangeB = max(part2)-min(part2)
        span = int((rangeA+rangeB)/2)
        dspan = int(meandiff/span)
        theta = float(meandiff/(rangeA+rangeB))
        oneortwo=""
        if dspan >3 and meandiff > 20 or meandiff>36:
            oneortwo = "Two distributions \n\n MD: %d \n Span: %d \n Dspan: %d \n theta: %d" % (meandiff, span, dspan, theta) 
        else:
            oneortwo = "One distribution \n\n MD: %d \n Span: %d \n Dspan: %d \n theta: %d" % (meandiff, span, dspan, theta)

        cans = np.array(candidates)
        plt.plot(cans[:,0],cans[:,1],'ro')
        plt.axhline(max(cans[:,1])/4, color='r')
        plt.axhline(max(cans[:,1]/2), color='r')
        plt.axhline(int(max(cans[:,1]))*0.75, color='r')
        red_patch = mpatches.Patch(color='red', label='75%, 50% and 25% \nof maximum frequency')
        plt.legend(handles=[red_patch])
        plt.ylabel('Frequency of occurence')
        plt.xlabel('separate items')
        plt.title('Frequency distribution estimation graph: %s' %(fname))
        plt.text(max(data)*1.1, max(cans[:,1])*0.62, oneortwo, fontsize = 11, color = 'r')
        plt.hist(data,range(int(min(data)),int(max(data)),1))
        ofile = fname[0:-3]+"png"
        print ("Writing outfile: %s") % (ofile)
        plt.savefig(ofile, bbox_inches='tight')
        if display == True: 
            plt.show()
        return;
コード例 #27
0
ファイル: p1q2.py プロジェクト: masbicudo/Trabalhos-UFRJ
def graficolog():
    ax = plt.gca()
    ax.set_yscale('log')
    plt.plot(k, eff, 'go')
    plt.plot(k, eaf, 'ro')
    plt.plot(k, ebf, 'bo')
    
    plt.grid(True)
    plt.show()
コード例 #28
0
def plotdatatree(treeID, scale1, mass1):
	plot_title="Mass Accretion History Tree " + str(treeID)   #Can code the number in with treemax
	x_axis="scale time"
	y_axis="total mass"
	figure_name=os.path.expanduser('~/figureTree' + str(treeID))
	#Choose which type of plot you would like: Commented out.
	plt.plot(scale1, mass1, linestyle="-", marker="o")
	#plt.scatter(scale1, mass1, label="first tree")

	plt.title(plot_title)
	plt.xlabel(x_axis)
	plt.ylabel(y_axis)
	#plt.yscale("log")

	plt.savefig(figure_name)

	#In order to Plot only a single tree on a plot must clear lists before loop. 
	#Comment out to over plot curves.			
	plt.clf()

	clearmass = []
	clearscale = []

	return clearmass, clearscale
コード例 #29
0
def plot_roc_vs_speed(prop, start, end, interval, altitude, weight, power, rpm, \
    temp = 'std', temp_units = 'C', rv = '8',  wing_area = 110, \
    speed_units = 'kt', flap = 0):
    
    """
    Creates matplotlib plot of rate of climb vs speed.
    """
    import matplotlib
    import pylab
    EAS = pylab.arange(start, end, interval)
    ROC = []
    for speed in EAS:
        ROC.append(R.roc(prop, altitude, speed, weight, power, rpm, temp, \
            temp_units, rv, wing_area, speed_units, flap))
    p = matplotlib.plot(EAS, ROC)
    p.show()
コード例 #30
0
def main():
	path1 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_train.txt"
	path2 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_test.txt"

	C = [10**-4,10**-3,10**-2, 10**-1, 10**0, 10**1, 10**2, 10**3, 10**4]
	accuracy_train = []
	linear = SVM(path1 , path2)

	# for training accuracy
	for i in range (len(C)):
		m, p = linear.classifier_train(0, C[i])
		plot(m)
		svm_save_model('model{}'.format(i+1), m)
		accuracy_train.append(p)
	# This plots the training accuracy of the data set in SVM for all the given C's 
	plt.plot(accuracy_train)


	# for testing accuracy
	accuracy_test = []
	for i in range(len(C)):
		model_name = "model{}".format(i+1)
		p_acc = linear.classifier_test(1, model_name)
		# save the accuracies in a list
		accuracy_test.append(p_acc)

	# plots the testing accuracy of the dataset
	plt.plot(accuracy_test)


	# for validation accuracy
	accuracy_validation = []
	for i in range(len(C)):
		model_name = "model{}".format(i+1)
		p_acc = linear.classifier_test(2, model_name)
		# save the accuracies in a list
		accuracy_validation.append(p_acc)

	# plots the testing accuracy of the dataset
	plt.plot(accuracy_validation)

	plt.xlabel("C's", fontsize = 15)
	plt.ylabel(" Accuracy", fontsize = 15)
	plt.title("Accuracy Curve (SVM)", fontsize = 25)
	# plt.ylim([0.1, 0.8])
	plt.grid(True)
	plt.legend(['Training', ' Testing', 'Validation'])
	plt.show()  
コード例 #31
0
ファイル: tools.py プロジェクト: sibofeng/PILAE
def draw_line_chart14(csvfile, savename, x_name, y_name1, y_name2):
    import matplotlib.pyplot as plt
    import pandas as pd
    csv = pd.read_csv(csvfile)
    x = csv[x_name]
    y1 = csv[y_name1]
    y2 = csv[y_name2]
    csv2 = pd.read_csv("../log/mnist_mapid_acc10.csv")
    y3 = csv2[y_name1]
    y4 = csv2[y_name2]

    plt.figure()
    plt.plot(x, y1, marker=".")
    plt.plot(x, y2, marker=".")
    plt.plot(x, y3, marker=".")
    plt.plot(x, y4, marker='.')

    plt.xlabel('number of maps')
    plt.ylabel('accuracy')
    plt.grid()
    plt.legend()
    plt.savefig('../eps/' + savename)
    plt.show()
def print_lot_statistics(coefs, orignal, reconstrcuted):
    # statistics
    dict_to_return = calc_stats(coefs, orignal, reconstrcuted)
    print(dict_to_return)

    # plot
    plt.plot(orignal)
    plt.plot(reconstrcuted)
    plt.legend(('original', 'decompressed'))
    plt.title('Compressed vs decompressed block_size {0}'.format(i))
    plt.show()

    plt.plot(coefs)
    plt.title('coeficient. count={0}'.format(len(coefs)))
    plt.show()
    return dict_to_return
コード例 #33
0
    def Data(
        time, time1, time2
    ):  # give a class structure to put all function in it for caculating
        print("The Random Searching")
        n = [10, 100, 1000, 10000, 100000]
        average = 0
        for i in range(0, 5):
            num = random.randint(0, 1000000)
            time = searching.random_search(num)
            text = 'Trial' + str(i) + ' ' + (time)
            print(text)
            average = average + time
        print("average " + str(average / 5))
        print('\n')
        plt.ylabel('The First Algorithm for time')
        plt.xlabel('Numbers')
        plt.plot(num, time)
        plt.show()

        print("The Linear Searching")
        for i in range(0, 5):
            num = random.randint(0, 1000000)
            time1 = searching.linear_search(num)
            text1 = 'Trial' + str(i) + ' ' + str(time1)
            print(text1)
            average = average + time1
        print(n, time1)
        print("average" + str(average / 5))
        print('\n')
        plt.ylabel('The Second Algorithm for time')
        plt.xlabel('Numbers')
        plt.plot(num, time1)
        plt.show()

        print("The Binary Searching")
        for i in range(0, 5):
            num = random.randint(0, 1000000)
            time2 = searching.binary_search(num)
            text2 = "Trial" + str(i) + ' ' + str(time2)
            print(text2)
            average = average + time2
        print("average" + str(average / 5))
        print('\n')
        plt.ylabel('The Third Algorithm for time')
        plt.xlabel('Numbers')
        plt.plot(num, time2)
        plt.show()
コード例 #34
0
def plot_binned_residuals(bin_df):
    '''
    Plotted binned residual averages and confidence intervals.
    
    ins
    --
    bin_df ie from bin_residuals(resid, var, bins)
    outs
    --
    pretty plots
    '''
    import matplotlib as plt

    plt.plot(bin_df['var'], bin_df['resid'], '.')
    plt.plot(bin_df['var'], bin_df['lower_ci'], '-r')
    plt.plot(bin_df['var'], bin_df['upper_ci'], '-r')
    plt.axhline(0, color = 'gray', lw = .5)
コード例 #35
0
def animate_plotting(subdir_path, ):
    average_filename = 'averaged_out.txt'
    if os.path.exists(os.path.join(subdir_path, average_filename)):
        print(subdir_path + average_filename +
              ' already exists please use hotPlot.py')
        #import existing data for average at the end


#            data_out = numpy.genfromtxt(os.path.join(subdir_path,average_filename))
#            averaged_data = numpy.array(data_out[:,1])
#            angles = data_out[:,0]
#os.remove( os.path.join(subdir_path,average_filename))
    else:
        files = os.listdir(subdir_path)
        #files = [d for d in os.listdir(subdir_path) if os.path.isdir(os.path.join(subdir_path, d))]
        onlyfiles_path = [
            os.path.join(subdir_path, f) for f in files
            if os.path.isfile(os.path.join(subdir_path, f))
        ]
        onlyfiles_path = natsort.natsorted(onlyfiles_path)
        averaged_data = []
        angles = []
        for f in onlyfiles_path:
            data = numpy.genfromtxt(f, delimiter=',')
            #data = pandas.read_csv(f)
            averaged_data.append(numpy.mean(data))
            angle = os.path.basename(f).split('_')[0]
            angles.append(float(angle))
        fig = plt.plot(angles, averaged_data, 'o')
        plt.yscale('log')
        plt.xscale('log')
        plt.legend(loc='upper right')
        plt.title(base_path)
        plt.grid(True)
        plt.xlabel(r'$\theta$ $[deg.]}$')
        #plt.xlabel(r'$\mathrm{xlabel\;with\;\LaTeX\;font}$')
        plt.ylabel(r'I($\theta$) $[a.u.]$')
コード例 #36
0
ファイル: ploting.py プロジェクト: zsc198841/KaggleAux
def plot_binned_residuals(bin_df):
    '''
    Plots the binned residual averages and confidence intervals of a binned dataframe.

    Parameters
    ----------
    bin_df : DataFrame
       the binned dataframe from bin_residuals(residuals, feature, bin_count).
    
    Returns
    -------
    matplotlib.figure :
        Plot of data frame residuals and confidence intervals.
    '''
    plt.plot(bin_df['var'], bin_df['resid'], '.')
    plt.plot(bin_df['var'], bin_df['lower_ci'], '-r')
    plt.plot(bin_df['var'], bin_df['upper_ci'], '-r')
    plt.axhline(0, color='gray', lw=0.5)
    return plt
コード例 #37
0
def fit_background(q, I):

    ## Working on background calculation
    ## mkak 2016.09.28

    x = q
    y = I
    pfit = np.polyfit(x, y, 4)
    yfit = np.polyval(pfit, x)
    #panel.plot(xrd_spectra[0], xrd_spectra[1]-yfit, label='no bkg')
    #panel.plot(xrd_spectra[0], yfit, color='blue', label='bkg')

    ### calculation works, but plotting here wipes previous plots - only shows last
    import matplotlib as plt
    plt.figure()
    plt.plot(x, y, label='raw data')
    plt.plot(x, yfit, label='background')
    plt.plot(x, y - yfit, label='background subtracted')
    plt.legend()
    plt.show()
コード例 #38
0
ファイル: kmeans.py プロジェクト: gbazack/Research_Papers
    def plot_board(self):
        X = self.X
        fig = plt.figure(figsize=(5, 5))
        plt.xlim(-1, 1)
        plt.ylim(-1, 1)

        if self.mu and self.clusters:
            mu = self.mu
            clus = self.clusters
            K = self.K

            for m, clu in clus.items():
                cs = cm.spectral(1. * m / self.K)
                plt.plot(mu[m][0],
                         mu[m][1],
                         'o',
                         marker='*',
                         markersize=12,
                         color=cs)
                plt.plot(zip(*clus[m])[0],
                         zip(*clus[m])[1],
                         '.',
                         markersize=8,
                         color=cs,
                         alpha=0.5)

        else:
            plt.plot(zip(*X)[0], zip(*X)[1], '.', alpha=0.5)

        if self.method == '++':
            tit = 'K-means++'
        else:
            tit = 'K-means with random initialization'

        pars = 'N=%s, K=%s' % (str(self.N), str(self.K))
        plt.title('\n'.join([pars, tit]), fontsize=16)
        plt.savefig('kpp_N%s_K%s.png' % (str(self.N), str(self.K)),
                    bbox_inches='tight',
                    dpi=200)
コード例 #39
0
	def plot_3df(df1,plot_title,x_axis,y_axis,plot,save):						# function for plotting high-dimension and low-dimension eigenvalues
		x1 = df1[df1.columns[0]]
		y1 = df1[df1.columns[1]]
		y2 = df1[df1.columns[2]]
		y3 = df1[df1.columns[3]]
		
		plt.figure(figsize=(8,8))
		plt.plot(x1, y1, color = 'red')		
		plt.plot(x1, y2, color = 'blue')		
		plt.plot(x1, y3, color = 'green')				
		plt.grid(color = 'black', linestyle = '-', linewidth = 0.1)				# parameters for plot grid
		#plt.xticks(np.arange(0,max(x1)*1.1, int(max(x1)/10)))					# adjusting the intervals to 250
		#plt.yticks(np.arange(93, 100))
		plt.title(plot_title).set_position([0.5,1.05])
		plt.xlabel(x_axis)
		plt.ylabel(y_axis)			
		plt.legend(loc = 'best')												# creating legend and placing in at the top right
		if save == 'yes':
			plt.savefig(plot_title)
		if plot == 'yes':
			plt.show()
		plt.close()
コード例 #40
0
ファイル: CNN.py プロジェクト: yanghoJI/Deepstudy
def plotdata(trl, tel, tea):
    xlist = range(len(trl))
    ax1 = plt.subplot(2, 1, 1)
    plt.plot(xlist, trl, 'r-', label='train loss')
    plt.plot(xlist, tel, 'b-', label='validation loss')
    plt.ylabel('loss value')
    plt.title('loss graph')
    plt.legend(loc=1)

    ax2 = plt.subplot(2, 1, 2)
    plt.plot(xlist, tea, 'b-', label='validation acc')
    #plt.ylim(0, 100)
    #plt.xlim(0, 100)
    plt.yticks(range(0, 101, 10))
    plt.grid(True)
    plt.ylabel('acc(%)')
    plt.title('acc graph')
    plt.legend(loc=1)

    plt.tight_layout()

    plt.savefig('batchNorWithxavier.png', dpi=300)
    plt.close()
コード例 #41
0
ファイル: plotting.py プロジェクト: morey18k/QuasarLifetimes
def plot_boot(qstack):
    plt.figure()
    flux_covar = qstack.flux_covar

    std = np.sqrt(np.diagonal(flux_covar))
    ax = plt.axes(None, label=str(bin_size))

    num = 100
    ws_boot = qstack.ws_boot[:100]
    fs_boot = qstack.fs_boot[:100]

    plt.plot(ws_boot.T, fs_boot.T, alpha=0.1, color='orange')
    plt.plot(ws_boot[0],
             fs_boot[0],
             alpha=0.1,
             color='orange',
             label='Bootstrap Samples')
    plt.plot(qstack.wave_stack, qstack.flux_stack, label='Stacked Flux')
    #plt.title("Stacked Continuum Normalized Flux Near Ly-$\\alpha$ Transition")
    plt.xlabel("Wavelength (Angstroms)")
    plt.ylabel("Stacked Continuum Normalized Flux")
    plt.axvline(x=1215.67, color='red', linestyle='--')
    plt.legend()
コード例 #42
0
ファイル: plot.py プロジェクト: worldofnick/Wikipedia-Mining
import matplotlib as plt
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.decomposition import TruncatedSVD


def load_sparse_csr(filename):
    loader = np.load(filename)
    return csr_matrix((loader['data'], loader['indices'], loader['indptr']),
                      shape=loader['shape'])


sparse_matrix = load_sparse_csr(
    '/home/crachmanin/Wikipedia-Mining/trigram_vectors/AS.npz')
small_matrix = sparse_matrix[:10, :]
reduced_data = TruncatedSVD(n_components=2).fit_transform(small_matrix)

plt.title('Article vectors recuded to d=2 by PCA')
plt.plot(X[:, 0], X[:, 1])
plt.savefig('pca.pdf')
コード例 #43
0
ファイル: index.py プロジェクト: guruprasaad123/ml_for_life
                              steps_per_epoch=ntrain,
                              epochs=128,
                              validation_data=test_generator,
                              validation_steps=ntest,
                              callbacks=callbacks)
model.save_weights('model_weights.h5')
model.save('model.h5')

acc = history.history['acc']
loss = history.history['loss']
val_acc = history.history['val_acc']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'b', label='Training Accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation Accuracy')
plt.title('Training & Validation Accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'b', label='Training Loss')
plt.plot(epochs, val_loss, 'r', label='Validation Loss')
plt.title('Training & Validation Loss')
plt.legend()

plt.show()
'''
humane_or_not() method should be enough to classify whether the image is Humane or Not
コード例 #44
0
##Get unique counts for all variables
df.apply(pd.Series.value_counts)

##List of unique values
df["Col3"].unique()

'''Data Conversions'''
##DateTime variables
df['DateVar'] = pd.to_datetime(df['DateVar'])


'''Plotting Data'''
x_values = df['Col1']
y_values = df['Col2']
y2_values = df['Col2'] * 3 - 200

plt.plot(x_values,y_values, c = 'color', label = 'line name')
plt.plot(x_values,y2_values, c = 'color', label = 'line name')
plt.xticks(rotation = 90)
plt.xlabel("X Axis")
plt.ylabel("Y Axis")
plt.set_xlim(xlow, xhigh)
plt.set_ylim(ylow,yhigh)
plt.legend(loc='upper right')
plt.title("Chart Title")
plt.tick_params(bottom="off", top="off", left="off", right="off")
plt.show()


plt.show()
コード例 #45
0
# IPython log file

import pandas as pd
import matplotlib as plt
import matplotlib.pyplot as plt

df = pd.read_csv("http://www.biostat.jhsph.edu/~rpeng/useRbook/faithful.csv")
plt.plot(df["eruptions"], df["waiting"], "b.")
plt.title("eruptions vs waiting")
plt.savefig("scatter.png")
plt.clf()

plt.hist(df["eruptions"])
plt.savefig("eruptions.png")
plt.clf()

plt.hist(df["waiting"])
plt.savefig("waiting.png")
plt.clf()
コード例 #46
0
ファイル: basicpost.py プロジェクト: c-pasillas/night2day
#from tensorflow import keras
#from tensorflow.keras import layers
import numpy as np
from keras.models import load_model
#data_file = Dataset(') #channels
#model_file = (')#ML model
#truth_file = () #DNB test data

data = np.load('/zdata2/cpasilla/TEST_data.npz') #"data_file"
model = load_model('/zdata2/cpasilla/JAN2020_ALL/OUTPUT/MODEL/model_C1_UNET_blocks_3_epochs_50.h5') # "model_file"
prediction = model.predict(data['Xdata_test'])
truth = data['Ydata_test']


###### load DNB radiances (Ydata_test) for the same set as Xdata_test  
# scatterplot 
import matplotlib as plt
x=prediction
y=truth

plt.plot(x, y, 'o', color='black')
plt.show()
#calculate RMSE
from sklearn.metrics import mean_squared_error 
from math import sqrt

RMSD = sqrt(mean_squared_error(y,x))

print(RMSD)

コード例 #47
0
def graph_kmeans(state_seq):
    plt.plot(range(len(state_seq)), state_seq)
    plt.show()
コード例 #48
0
def main():

    average_mistakes_test = []
    average_mistakes_train = []

    p = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

    path1 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_train.txt"
    path2 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_test.txt"
    bc = BinaryClassifier(path1, path2)

    for i in range(len(p)):
        path1 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold{}_sm_train.txt".format(
            p[i])
        path2 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold{}_sm_test.txt".format(
            p[i])
        bc = BinaryClassifier(path1, path2)
        # bc.classifier_train_average_perceptron()
        # 1 for PA and 0 for Perceptron
        # bc.classifier_train(0)
        bc.classifier_train_average_perceptron()
        # print (bc.w)
        # print (bc.mistakes)
        average_mistakes_train.append(bc.mistakes_train)
        average_mistakes_test.append(bc.mistakes_test)

    average_mistakes_train = np.array(np.mean(average_mistakes_train, axis=0),
                                      dtype=float)
    average_mistakes_train = np.array(np.subtract(len(bc.train_list_word),
                                                  average_mistakes_train),
                                      dtype=float)
    average_mistakes_train = np.array(np.divide(average_mistakes_train,
                                                len(bc.train_list_word)),
                                      dtype=float)
    plt.plot(average_mistakes_train)

    average_mistakes_test = np.array(np.mean(average_mistakes_test, axis=0),
                                     dtype=float)
    average_mistakes_test = np.array(np.subtract(len(bc.test_list_word),
                                                 average_mistakes_test),
                                     dtype=float)
    average_mistakes_test = np.array(np.divide(average_mistakes_test,
                                               len(bc.test_list_word)),
                                     dtype=float)
    plt.plot(average_mistakes_test)

    # average_mistakes_test = np.array(50, dtype = 'f')
    # average_mistakes_train = []

    # path1 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_train.txt"
    # path2 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_test.txt"
    # bc = BinaryClassifier(path1, path2)
    # bc.classifier_train(0)
    # average_mistakes_test = np.array((np.subtract(len(bc.test_list_word), bc.mistakes_test)), dtype = float)
    # average_mistakes_test = (average_mistakes_test / len(bc.test_list_word))
    # plt.plot(average_mistakes_test)

    # average_mistakes_test = np.array(50, dtype = 'f')
    # average_mistakes_train = []

    # path1 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_train.txt"
    # path2 = "/home/goelshivam12/Desktop/ML_Homework/HW#1/OCRdata/ocr_fold0_sm_test.txt"
    # bc = BinaryClassifier(path1, path2)
    # bc.classifier_train(1)
    # average_mistakes_test = np.array(np.subtract(len(bc.test_list_word), bc.mistakes_test), dtype = float)
    # average_mistakes_test = (average_mistakes_test / len(bc.test_list_word))
    # plt.plot(average_mistakes_test)

    plt.xlabel("Number of iterations", fontsize=15)
    plt.ylabel(" Accuracy", fontsize=15)
    plt.title("Averaged Perceptron (Training vs Testing)", fontsize=25)
    plt.grid(True)
    plt.legend(['A.Perceptron (Training)', 'A. Perceptron (Testing)'])
    plt.show()
コード例 #49
0
plt.rc('ytick', labelsize=13)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fig = plt.figure()  # create a figure object
for ii in xrange(Nobj):  # looping over objects
    if args.skip and ii <= args.skip: continue
    line = buffer[ii + 1].split()
    yvals = line[1:Nbins + 1]
    proball = np.add(proball,
                     np.array(map(float,
                                  yvals)))  # adding all probabilities together
    plotname = args.outputdir + 'probplot' + line[0]
    if args.verbose:
        print ':: ' + sys.argv[0] + ' :: Creating figure ' + plotname
    fig.clf()  # clearing figure
    ax = fig.add_subplot(1, 1, 1)  # create an axes object in the figure
    plt.plot(xvals, yvals)
    ax.grid(True, linestyle='-', color='0.75')
    ax.set_xlabel('z')
    ax.set_ylabel('P$(z)$')
    ax.set_title(fieldnameTEX)
    fig.savefig(plotname + '.png')
    if args.eps: fig.savefig(plotname + '.eps')
    if args.pdf: fig.savefig(plotname + '.pdf')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if not args.skip:  # only create probplot all if skip keyword is not given
    plotname = args.outputdir + 'probplotALL'
    if args.verbose:
        print ':: ' + sys.argv[0] + ' :: Creating figure ' + plotname
    fig = plt.figure()  # create a figure object
    fig.clf()  # clearing figure
    ax = fig.add_subplot(1, 1, 1)  # create an axes object in the figure
コード例 #50
0
def main():

    #### LOAD FACE DATA
    face_data, face_label = load_face_data('face(1).mat')

    #### PARTITION DATA INTO TRAIN AND TEST SET
    X_train, X_test, Y_train, Y_test = partition_data(face_data,
                                                      face_label,
                                                      show='no')

    #### OBTAIN ORIGINAL AND NORMALIZED FEATURE VECTORS
    original_train, norm_train = get_original_normalized_feature_vectors(
        X_train, show='no')
    original_test, norm_test = get_original_normalized_feature_vectors(
        X_test, show='no')

    #### DISTANCE DEFINITIONS
    L1_NN = NearestNeighbors(n_neighbors=200, metric='minkowski',
                             p=1)  #manhattan l1
    L2_NN = NearestNeighbors(n_neighbors=200, metric='minkowski',
                             p=2)  #euclidean l2
    Linf_NN = NearestNeighbors(n_neighbors=200,
                               metric='chebyshev')  #chesboard/chebyshev linf

    earthmover = NearestNeighbors(n_neighbors=200,
                                  metric=wasserstein_distance)  #wassterstein
    intersection = NearestNeighbors(
        n_neighbors=200, metric=histogram_intersection)  #intersection
    chisquare = NearestNeighbors(n_neighbors=200, metric=chi)
    kldiv = NearestNeighbors(n_neighbors=200, metric=kl)
    js = NearestNeighbors(n_neighbors=200, metric=distance.jensenshannon)

    #### HISTOGRAM
    A_test = []
    for i in range(0, X_test.shape[1]):
        A_test.append(X_test[:, i])

    A_train = []
    for i in range(0, X_train.shape[1]):
        A_train.append(X_train[:, i])

    bin_width = 10
    intensity_max = 255
    n_bins = math.ceil(intensity_max / bin_width)

    bin_list = np.arange(0, 270, 10).tolist()  # Create a bin list from 0-260
    # It was found empirically that test images' pixel intensities ranged from 0 to ~260
    # Assuming uniform quantisation

    print("List of bins:", '\n', bin_list, '\n')

    X_hist_test = []
    for i in range(0, X_test.shape[1]):
        X_hist, bins, patches = plt.hist(A_test[i], bins=bin_list)
        X_hist_test.append(X_hist)
        plt.close()

    X_hist_train = []
    for j in range(0, X_train.shape[1]):
        X_hist, bins, patches = plt.hist(A_train[j], bins=bin_list)
        X_hist_train.append(X_hist)
        plt.close()
    plt.close()

    X_hist_test = np.asarray(X_hist_test)
    X_hist_train = np.asarray(X_hist_train)

    methods = [
        L2_NN, L1_NN, Linf_NN, earthmover, intersection, chisquare, kldiv, js
    ]
    method_name = [
        'L2', 'L1', 'Linf_NN', 'Earthmover', 'Intersection', 'Chi-Square',
        'K-L Divergence', 'JS'
    ]
    test_datas = [X_hist_test]
    train_datas = [X_hist_train]
    test_name = ['Histogram']
    M_pca_list = [16, 32, 64, 128, 256]
    M_pca_list = [4, 8, 12, 16, 22, 26]  # max of 26 as there are 26 bins
    data_type = [0, 1]

    recall_levels = 11
    M_lda = 10
    lda = LinearDiscriminantAnalysis(n_components=M_lda)

    method_count = 0
    for method in methods:

        #for test_data in test_datas:
        #for type in data_type:

        Mpca_list = []
        mAP_pca_list = []
        mAP_lda_list = []

        acc1_pca_list = []
        acc1_lda_list = []

        acc10_pca_list = []
        acc10_lda_list = []

        for M_pca in M_pca_list:

            #pca = PCA(n_components=M_pca)
            #lda = LinearDiscriminantAnalysis(n_components=M_lda)

            #test_pca = pca.fit_transform(test_data)
            #test_lda = lda.fit_transform(test_pca, Y_test)

            pca = PCA(n_components=M_pca)
            #train_pca = pca.fit_transform(train_datas[0])
            #test_pca = pca.transform(test_datas[0])

            train_pca = pca.fit_transform(X_hist_train)
            test_pca = pca.transform(X_hist_test)

            train_lda = lda.fit_transform(train_pca, Y_train)
            test_lda = lda.transform(test_pca)

            method.fit(test_pca)
            method_nbrs_pca = np.asarray(method.kneighbors(test_pca))
            method_map_pca, method_df_pca, acc1_pca, acc10_pca = calculate_map(
                method_nbrs_pca, Y_test, recall_levels)

            method.fit(test_lda)
            method_nbrs_lda = np.asarray(method.kneighbors(test_lda))
            method_map_lda, method_df_lda, acc1_lda, acc10_lda = calculate_map(
                method_nbrs_lda, Y_test, recall_levels)

            #print(method_name[method_count],test_name[name_count],", Mpca =",M_pca,"PCA mAP:",method_map_pca)
            #print(method_name[method_count],test_name[name_count],", Mpca =",M_pca,"PCA-LDA mAP:",method_map_lda)

            print(method_name[method_count], ", Mpca =", M_pca, "PCA mAP:",
                  method_map_pca, ",Acc@1:", acc1_pca, ",Acc@10:", acc10_pca)
            print(method_name[method_count], ", Mpca =", M_pca, "PCA-LDA mAP:",
                  method_map_lda, ",Acc@1:", acc1_lda, ",Acc@10:", acc10_lda)

            Mpca_list.append(M_pca)
            mAP_pca_list.append(method_map_pca)
            mAP_lda_list.append(method_map_lda)

            acc1_pca_list.append(acc1_pca)
            acc1_lda_list.append(acc1_lda)
            acc10_pca_list.append(acc10_pca)
            acc10_lda_list.append(acc10_lda)

        x1 = Mpca_list
        y1 = mAP_pca_list
        y2 = mAP_lda_list
        y3 = acc1_pca_list
        y4 = acc1_lda_list
        y5 = acc10_pca_list
        y6 = acc10_lda_list

        plt.figure(figsize=(10, 10))

        plt.plot(x1, y1, color='red', label='PCA mAP', marker='o')
        plt.plot(x1, y2, color='red', label='PCA-LDA mAP', marker='x')

        plt.plot(x1, y3, color='blue', label='PCA Acc@rank1', marker='o')
        plt.plot(x1, y4, color='blue', label='PCA-LDA Acc@rank1', marker='x')

        plt.plot(x1, y5, color='green', label='PCA Acc@rank10', marker='o')
        plt.plot(x1, y6, color='green', label='PCA-LDA Acc@rank10', marker='x')

        plt.grid(color='black', linestyle='-',
                 linewidth=0.1)  # parameters for plot grid
        title_name = str(method_name[method_count] + " " + test_name[0] +
                         ' PCA and PCA-LDA Performance')
        plt.title(title_name).set_position([0.5, 1.05])
        plt.xlabel('Mpca')
        plt.ylabel('mAP, Accuracy')
        plt.legend(loc='best')
        '''
		for i, txt in enumerate(y1):
			plt.annotate(txt, (x1[i], y1[i]))
		for i, txt in enumerate(y2):
			plt.annotate(txt, (x1[i], y2[i]))	
		'''

        plt.savefig(title_name)
        #plt.show()
        plt.close()

        print("		")
        method_count = method_count + 1
for i in range(2):
    mean_tpr += interp(all_fpr, fpr[i], tpr[i])

# Finally average it and compute AUC
mean_tpr /= 2

fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"],
         tpr["micro"],
         label='micro-average ROC curve (area = {0:0.2f})'
         ''.format(roc_auc["micro"]),
         color='deeppink',
         linestyle=':',
         linewidth=4)

plt.plot(fpr["macro"],
         tpr["macro"],
         label='macro-average ROC curve (area = {0:0.2f})'
         ''.format(roc_auc["macro"]),
         color='navy',
         linestyle=':',
         linewidth=4)

colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(2), colors):
    plt.plot(fpr[i],
コード例 #52
0
x = dataset.iloc[:, 3:5].values

#using elbow method to find optimal number of clusters
from sklearn.cluster import KMeans

wcss = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters=i,
                    init='k-means++',
                    max_iter=300,
                    n_init=10,
                    random_state=0)
    kmeans.fit(x)
    wcss.append(kmeans.inertia_)

plt.plot(range(1, 11), wcss)
plt.title('elbow method')
plt.xlabel('no of clusters')
plt.ylabel('wcss')
plt.show()

#aplying k means to dataset
kmeans = KMeans(n_clusters=5,
                init='k-means++',
                max_iter=300,
                n_init=10,
                random_state=0)
y_kmeans = kmeans.fit_predict(x)

#visualizing the clusters
plt.scatter(x[y_kmeans == 0, 0],
コード例 #53
0
partial_y_train = y_train[10000:]
#Not sure how many epochs to give...currently giving 10
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=10,
                    batch_size=512,
                    validation_data=(x_val, y_val))
print(history.history.keys())
history_dict = history.history
loss_val = history_dict['loss']
vald_loss_values = history_dict['val_loss']
acc_values = history_dict['accuracy']
vald_acc_values = history_dict['val_accuracy']
epochs = range(1, len(acc_values) + 1)
#plotting the graph of validation and training loss
plt.plot(epochs, loss_val, 'bo', label='Training loss')
plt.plot(epochs, vald_loss_values, 'b', label='Validation loss')
plt.title('Training and validation')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

#for graph of validation and training accuracy
'''
plt.plot(epochs,acc_values,'bo',label = 'Training accuracy')
plt.plot(epochs,vald_acc_values,'b',label = 'Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
'''
コード例 #54
0
                            'Set point': SetPoint[p]
                        })
                    time.sleep(1)

                    p += 1  # Increment set point index

                    if p >= 5:  # if all set points have been tested the test has been completed
                        print(
                            'Test complete. Generated .CSV file can be found at:  '
                        )
                        end = time.time()
                        print('Test runtime: ',
                              (end - start))  # print out run time

                        y = array(REF_LIST)
                        plt.plot(y)
                        plt.ylabel('Temperature')  # plot reference temp list
                        plt.show()

                        ser_bath.reset_input_buffer()
                        time.sleep(30)

                        ser_bath.write(
                            b's=20\r\n'
                        )  # set the bath temp to room temp to prevent overloading the heater/ cooler
                        time.sleep(30)
                        ex = input(
                            'To exit the program press any key'
                        )  # The program will hang here until user input is entered
                        exit()
コード例 #55
0
ファイル: train_recognizer.py プロジェクト: phucph/Tracking
#     # validation_steps=len(val_ds) // BATCH_SIZE,
#     epochs=10,
#     # max_queue_size=BATCH_SIZE * 2,
#     callbacks=callbacks, verbose=1)

epochs = 10
# Plot
import matplotlib as plt

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
コード例 #56
0
def main():
    #Loading the graph
    epinions = snap.LoadEdgeList(snap.PNGraph, "soc-Epinions1.txt", 0, 1)
    pr = PageRank(epinions, 0.8, 0.001)

    #calling page rank function
    #print pr
    #getting number of strongly connected components in the graph
    scc = snap.GetMxScc(epinions)

    #Storing SCC nodes id's in an array
    sccNodes = []
    for nodes in scc.Nodes():
        sccNodes.append(nodes.GetId())
    #storing total nodes
    nodeList = []
    for node in epinions.Nodes():
        nodeList.append(node.GetId())

    rankDesc = []
    rankIds = []
    #Computing top rank nodes
    for index, element in enumerate(pr):
        b, c = element
        rankDesc.append(b)
        rankIds.append(nodeList[index])

    rankDesc.sort(reverse=True)
    rankIds.sort(reverse=True)

    topRankNodes = rankDesc[0:10]
    topIds = rankIds[0:10]
    print "Top Rank Nodes: ", topRankNodes
    # Number of incoming edges (indegree of x)
    #Ranks of all the source pages having hyperlinks toward x
    for index, element in enumerate(topIds):

        currentNode = epinions.GetNI(topIds[index])
        x = currentNode.GetInDeg()

        for i in range(x):
            innerNode = currentNode.GetInNId(i)
            indi = nodeList.index(innerNode)
            ele = pr[indi]
            print "In Degree: ", innerNode, "w.r.t. node: ", x, "Rank: ", ele

    #printing number of strongly connected components in the graph
    print "Number of nodes in SCC: ", scc.GetNodes()

    #Applying a BFS to get the Out Set from node 1
    BfsOutSet = snap.GetBfsTree(epinions, sccNodes[0], True, False)
    #storing Out Set nodes in an array
    bfsOutNodes = []
    for nodes in BfsOutSet.Nodes():
        if (nodes.GetId() not in sccNodes):
            bfsOutNodes.append(nodes.GetId())
    #removing the SCC to get the Out Set Nodes
    for outNode in BfsOutSet.Nodes():
        if outNode.GetId() in sccNodes:
            BfsOutSet.DelNode(outNode.GetId())
    print "Number of OutSet Nodes: ", BfsOutSet.GetNodes()
    #applying BFS search to find the tendrils in Out Set
    outSetTen = snap.GetBfsTree(BfsOutSet, bfsOutNodes[0], False, True)
    print "Tendrils in OutSet: ", outSetTen.GetNodes()
    #storing out set tendrils in an array to use it later
    outTendrils = []
    for node in outSetTen.Nodes():
        outTendrils.append(node.GetId())
    #applying BFS to get in set nodes
    BfsInSet = snap.GetBfsTree(epinions, sccNodes[0], False, True)
    #storing In Set nodes in an array
    bfsInNodes = []
    for nodes in BfsInSet.Nodes():
        if (nodes.GetId() not in sccNodes):
            bfsInNodes.append(nodes.GetId())
    #removing the SCC to get the Out Set Nodes
    for inNode in BfsInSet.Nodes():
        if inNode.GetId() in sccNodes:
            BfsInSet.DelNode(inNode.GetId())
    print "Number of InSet Nodes: ", BfsInSet.GetNodes(), "clone:", len(
        bfsInNodes)
    #applying BFS search to find the tendrils in Out Set
    inSetTen = snap.GetBfsTree(BfsInSet, bfsInNodes[0], False, True)
    print "Tendrils in InSet: ", inSetTen.GetNodes()
    #storing out set tendrils in an array to use it later
    inTendrils = []
    for node in inSetTen.Nodes():
        inTendrils.append(node.GetId())
    #tubes in a SCC
    tubeNodes = []
    for nodes in inSetTen.Nodes():
        if nodes in outSetTen.Nodes():
            tubeNodes.append(nodes.GetId())
    print "Tubes in SCC: ", len(tubeNodes)
    #storing disconnected region in an array
    disComp = []
    for nodes in epinions.Nodes():
        if (nodes.GetId() not in sccNodes) and (
                nodes.GetId()
                not in bfsOutNodes) and (nodes.GetId() not in bfsInNodes) and (
                    nodes.GetId() not in inTendrils) and (nodes.GetId()
                                                          not in outTendrils):
            disComp.append(nodes.GetId())
    print "Number of Disconnected Components: ", len(disComp)
    probabilities = Random(epinions, 5)
    probabilities, nodes = Random(epinions, 5)
    plt.plot()
    plt.plot(nodes, probabilities)
    plt.xlabel('No of Nodes')
    plt.ylablel('Probability that path exists')
    plt.show()
コード例 #57
0
import matplotlib
matplotlib.use("Agg")
import matplotlib as plt

squares = [1, 4, 9, 16, 25]
plt.plot(squares)
plt.show()
コード例 #58
0
ファイル: logistic_np.py プロジェクト: ductnn/Logistic
def plot_loss(all_loss):
    plt.figure(1)
    plt.clf()
    plt.plot(all_loss)
コード例 #59
0
def show_overfitting(request):
    dic = {}

    # 实验一:对比不同模型的cross-validation结果

    # 用load_wine方法导入数据
    wine_data = datasets.load_wine()
    # print(wine_data.feature_names)
    data_input = wine_data.data
    data_output = wine_data.target

    rf_class = RandomForestClassifier()
    lr_class = LogisticRegression()
    svm_class = svm.LinearSVC()

    # print(cross_val_score(rf_class, data_input, data_output, scoring='accuracy', cv=4))

    # 1.使用随机森林方法观察准确率
    accuracy_rf = cross_val_score(
        rf_class, data_input, data_output, scoring='accuracy',
        cv=10).mean() * 100
    print('Accuracy of Random Forest is:', accuracy_rf)

    # 2.使用支持向量机方法观察准确率
    accuracy_svm = cross_val_score(
        svm_class, data_input, data_output, scoring='accuracy',
        cv=10).mean() * 100
    print('Accuracy of SVM is:', accuracy_svm)

    # 3.使用逻辑回归方法观察准确率
    accuracy_lr = cross_val_score(
        lr_class, data_input, data_output, scoring='accuracy',
        cv=10).mean() * 100
    print('Accuracy of LogisticRegression is:', accuracy_lr)

    rcParams['figure.figsize'] = 12, 10

    x = np.array([1.4 * i * np.pi / 180 for i in range(0, 300, 4)])
    np.random.seed(20)  # 固定每次生成的随机数
    y = np.sin(x) + np.random.normal(0, 0.2, len(x))
    data = pd.DataFrame(np.column_stack([x, y]), columns=['x', 'y'])
    plt.plot(data['x'], data['y'], '.')
    file = "static/img/han01.png"
    dic["pic1"] = "/" + file
    plt.savefig(file)
    # plt.show()

    for i in range(2, 16):  # power of 1 is already there
        colname = 'x_%d' % i  # new var will be x_power
        data[colname] = data['x']**i
    # print(data.head())

    def linear_regression(data, power, models_to_plot):
        # initialize predictors:
        predictors = ['x']
        if power >= 2:
            predictors.extend(['x_%d' % i for i in range(2, power + 1)])

        # Fit the model
        linreg = LinearRegression(normalize=True)
        linreg.fit(data[predictors], data['y'])
        y_pred = linreg.predict(data[predictors])

        # Check if a plot is to be made for the entered power
        if power in models_to_plot:
            plt.subplot(models_to_plot[power])
            plt.tight_layout()
            plt.plot(data['x'], y_pred)
            plt.plot(data['x'], data['y'], '.')
            plt.title('Plot for power: %d' % power)

        # Return the result in pre-defined_format
        rss = sum((y_pred - data['y'])**2)
        ret = [rss]
        ret.extend([linreg.intercept_])
        ret.extend(linreg.coef_)
        return ret

    col = ['rss', 'intercept'] + ['coef_x_%d' % i for i in range(1, 16)]
    ind = ['model_pow_%d' % i for i in range(1, 16)]
    coef_matrix_simple = pd.DataFrame(index=ind, columns=col)
    # 注意上行代码的columns不能携程column单数,画图就无法画出来了
    # 定义作图的位置与模型的复杂度
    models_to_plot = {1: 231, 3: 232, 6: 233, 8: 234, 11: 235, 14: 236}

    # 画出来
    for i in range(1, 16):
        coef_matrix_simple.iloc[i - 1, 0:i + 2] = linear_regression(
            data, power=i, models_to_plot=models_to_plot)
    file = "static/img/han02.png"
    dic["pic2"] = "/" + file
    plt.savefig(file)
    # plt.show()

    # 定义作图的位置与模型的复杂度
    models_to_plot = {
        1e-15: 231,
        1e-10: 232,
        1e-4: 233,
        1e-3: 234,
        1e-2: 235,
        5: 236
    }

    def ridge_regression(data, predictors, alpha, models_to_plot={}):
        # Fit the model:1.初始化模型配置  2.模型拟合  3.模型预测
        ridgereg = Ridge(alpha=alpha, normalize=True)
        ridgereg.fit(data[predictors], data['y'])
        # predictors的内容实际是data(定义的一种DataFrame数据结构)的某列名称
        y_pred = ridgereg.predict(data[predictors])

        # Check if a plot is to be made for the entered alpha
        if alpha in models_to_plot:
            plt.subplot(models_to_plot[alpha])
            plt.tight_layout()
            plt.plot(data['x'], y_pred)  # 画出拟合曲线图
            plt.plot(data['x'], data['y'], '.')  # 画出样本的散点图
            plt.title('Plot for alpha: %.3g' % alpha)

        # Return the result in pre-defined format
        rss = sum((y_pred - data['y'])**2)
        ret = [rss]
        ret.extend([ridgereg.intercept_])
        ret.extend(ridgereg.coef_)
        return ret

    predictors = ['x']
    predictors.extend(['x_%d' % i for i in range(2, 16)])

    # Set the different values of alpha to be tested
    alpha_ridge = [1e-15, 1e-10, 1e-8, 1e-4, 1e-3, 1e-2, 1, 5, 10, 20]

    # Initialize the dataframe for storing coeficients
    col = ['rss', 'intercept'] + ['coef_x_%d' % i for i in range(1, 16)]
    ind = ['alpha_%.2g' % alpha_ridge[i] for i in range(0, 10)]
    coef_matrix_ridge = pd.DataFrame(index=ind, columns=col)

    models_to_plot = {
        1e-15: 231,
        1e-10: 232,
        1e-4: 233,
        1e-3: 234,
        1e-2: 235,
        5: 236
    }
    for i in range(10):
        coef_matrix_ridge.iloc[i, ] = ridge_regression(data, predictors,
                                                       alpha_ridge[i],
                                                       models_to_plot)
    file = "static/img/han03.png"
    dic["pic3"] = "/" + file
    plt.savefig(file)

    # plt.show()

    def lasso_regression(data, predictors, alpha, models_to_plot={}):
        # Fit the model
        lassoreg = Lasso(alpha=alpha, normalize=True, max_iter=1e5)
        lassoreg.fit(data[predictors], data['y'])
        y_pred = lassoreg.predict(data[predictors])
        # Check if a plot is to be made for the entered alpha
        if alpha in models_to_plot:
            plt.subplot(models_to_plot[alpha])
            plt.tight_layout()
            plt.plot(data['x'], y_pred)
            plt.plot(data['x'], data['y'], '.')
            plt.title('Plot for alpha:%.3g' % alpha)

        # Return the result in pre-defined format
        rss = sum((y_pred - data['y'])**2)
        ret = [rss]
        ret.extend([lassoreg.intercept_])
        ret.extend(lassoreg.coef_)
        return ret

    predictors = ['x']
    predictors.extend(['x_%d' % i for i in range(2, 16)])

    # Define the alpha values to test
    alpha_lasso = [1e-15, 1e-10, 1e-8, 1e-5, 1e-4, 1e-3, 1e-2, 1, 5, 10]

    # Initialize the dataframe to store coefficients
    col = ['rss', 'intercept'] + ['coef_x_%d' % i for i in range(1, 16)]
    ind = ['alpha_%.2g' % alpha_lasso[i] for i in range(0, 10)]
    coef_matrix_lasso = pd.DataFrame(index=ind, columns=col)

    # Define the models_to_plot
    models_to_plot = {
        1e-10: 231,
        1e-5: 232,
        1e-4: 233,
        1e-3: 234,
        1e-2: 235,
        1: 236
    }

    # Iterate over the 10 alpha values:
    for i in range(10):
        coef_matrix_lasso.iloc[i, ] = lasso_regression(data, predictors,
                                                       alpha_lasso[i],
                                                       models_to_plot)
    file = "static/img/han04.png"
    dic["pic4"] = "/" + file
    plt.savefig(file)
    # plt.show()
    return render(request, "overfitting.html", dic)
コード例 #60
0
def plottMiddel(start, end, N, T_S):
    hList = [1/N for k in range(N)]
    fList = range(start, end, 1)
    HList = fillList(fList, hList, N, T_S)
    plt.plot(fList, HList)
    return 0