コード例 #1
0
ファイル: emp_model.py プロジェクト: gcallah/Indra
 def draw(self):
     """
     Draw a network graph of the employee relationship.
     """
     if self.graph is not None:
         nx.draw_networkx(self.graph)
         plt.show()
コード例 #2
0
ファイル: learn.py プロジェクト: timjong93/MachineLearning
def showScatterPlot(data, labels, idx1, idx2): 
    import matplotlib.pyplot as plt 
    fig = plt.figure() 
    ax = fig.add_subplot(111) 
    # X-axis data, Y-axis data, Size for each sample, Color for each sample 
    ax.scatter(data[:,idx1], data[:,idx2], 100.0*(1 + np.array(labels)), 100.0*(1 + np.array(labels))) 
    plt.show()
コード例 #3
0
def genCurve(dataSet, tree):
	x = [] # stores the x axis of the graph
	trainList = [] # the list of accuracies derived from training data
	valList = [] # the list of accuracies derived from validation data
	i = 0
	while i < 1: 
		i = i+0.1
		a = 0
		b = 0
		for trial in range(3):
			newData = sortData(dataSet, i) # MAKE THIS
			tree = getTree(newData) # NEED TO GET THIS FUNCTION WHEN TREEGEN WORKS
			a = a + model_validation.validateTree(tree, newData)
			b = b + model_validation.validateTree(tree, newData)
		a = float(a)/3
		b = float(b)/3

		trainList.append(a)
		valList.append(b)
		x.append(i)

	plt.plot(x, trainList)
	plt.plot(x, valList)
	plt.xlabel('percent training used')
	plt.ylabel('percent accuracy')
	plt.title('learning curve')
	plt.show()
コード例 #4
0
def plotRaster(clustaArray=[]):

    if len(clustaArray) < 1:
        print "Nothing to plot!"
    else:
        # create list of times that maps to each spike
        p_sptimes = []
        for a in clustaArray:
            for b in a.spike_samples:
                p_sptimes.append(b)
        sptimes = np.array(p_sptimes)

        p_clusters = []
        for c in clustaArray:
            for d in c.id_of_spike:
                p_clusters.append(c.id_of_clusta)
        clusters = np.array(p_clusters)

        # dynamically generate cluster list
        clusterList = []
        for a in clustaArray:
            clusterList.append(a.id_of_clusta)
        # plot raster for all clusters

        # nclusters = 20

        # #for n in range(nclusters):
        timesList = []
        for n in clusterList:
            # if n<>9:
            ctimes = sptimes[clusters == n]
            timesList.append(ctimes)
            plt.plot(ctimes, np.ones(len(ctimes)) * n, "|")
        plt.show()
コード例 #5
0
def plotRetinaSpikes(retina=None, label=""):
    
    assert retina is not None, "Network is not initialised! Visualising failed."
    import matplotlib.pyplot as plt
    from matplotlib import animation
    
    print "Visualising {0} Spikes...".format(label) 

    spikes = [x.getSpikes() for x in retina]
#     print spikes
    
    sortedSpikes = sortSpikes(spikes)
#     print sortedSpikes
    
    framesOfSpikes = generateFrames(sortedSpikes)
#     print framesOfSpikes
    
    x = range(0, dimensionRetinaX)
    y = range(0, dimensionRetinaY)
    from numpy import meshgrid
    rows, pixels = meshgrid(x,y)
    
    fig = plt.figure()
    
    initialData = createInitialisingData()
    
    imNet = plt.imshow(initialData, cmap='green', interpolation='none', origin='upper')
    
    plt.xticks(range(0, dimensionRetinaX)) 
    plt.yticks(range(0, dimensionRetinaY))
    args = (framesOfSpikes, imNet)
    anim = animation.FuncAnimation(fig, animate, fargs=args, frames=int(simulationTime)*10, interval=30)
          
    plt.show()
コード例 #6
0
def plotColorCodedNetworkSpikes(network):
    assert network is not None, "Network is not initialised! Visualising failed."
    import matplotlib as plt
    from NetworkBuilder import sameDisparityInd
    
    cellsOutSortedByDisp = []
    spikes = []
    for disp in range(0, maxDisparity+1):
        cellsOutSortedByDisp.append([network[x][2] for x in sameDisparityInd[disp]])
        spikes.append([x.getSpikes() for x in cellsOutSortedByDisp[disp]])
    
    sortedSpikes = sortSpikesByColor(spikes)
    print sortedSpikes
    framesOfSpikes = generateColoredFrames(sortedSpikes)
    print framesOfSpikes
    
    fig = plt.figure()
    
    initialData = createInitialisingDataColoredPlot()
    
    imNet = plt.imshow(initialData[0], c=initialData[1], cmap=plt.cm.coolwarm, interpolation='none', origin='upper')
    
    plt.xticks(range(0, dimensionRetinaX)) 
    plt.yticks(range(0, dimensionRetinaY))
    plt.title("Disparity Map {0}".format(disparity))
    args = (framesOfSpikes, imNet)
    anim = animation.FuncAnimation(fig, animate, fargs=args, frames=int(simulationTime)*10, interval=30)
          
    plt.show()
コード例 #7
0
ファイル: analysis.py プロジェクト: tacaswell/xpdAcq
def plot_images(header):
    ''' function to plot images from header.

    It plots images, return nothing
    Parameters
    ----------
        header : databroker header object
            header pulled out from central file system
    '''
    # prepare header
    if type(list(headers)[1]) == str:
        header_list = list()
        header_list.append(headers)
    else:
        header_list = headers

    for header in header_list:
        uid = header.start.uid
        img_field = _identify_image_field(header)
        imgs = np.array(get_images(header, img_field))
        print('Plotting your data now...')
        for i in range(imgs.shape[0]):
            img = imgs[i]
            plot_title = '_'.join(uid, str(i))
            # just display user uid and index of this image
            try:
                fig = plt.figure(plot_title)
                plt.imshow(img)
                plt.show()
            except:
                pass # allow matplotlib to crash without stopping other function
コード例 #8
0
ファイル: learn.py プロジェクト: timjong93/MachineLearning
def showScatterPlot(data, idx1, idx2): 
    import matplotlib.pyplot as plt 
    fig = plt.figure() 
    ax = fig.add_subplot(111) 
    
    # X-axis data, Y-axis data, Size for each sample, Color for each sample 
    ax.scatter(data[:,idx1], data[:,idx2]) 
    plt.show()
コード例 #9
0
    def draw(self):
        positions = {}
        Tree.get_positions(self, positions, x=(0, 10), y=(0, 10))
        g = self.to_graph()

        plt.axis('on')
        nx.draw_networkx(g, positions, node_size=1500, font_size=24, node_color='g')
        plt.show()
コード例 #10
0
ファイル: p1q2.py プロジェクト: masbicudo/Trabalhos-UFRJ
def graficolog():
    ax = plt.gca()
    ax.set_yscale('log')
    plt.plot(k, eff, 'go')
    plt.plot(k, eaf, 'ro')
    plt.plot(k, ebf, 'bo')
    
    plt.grid(True)
    plt.show()
コード例 #11
0
ファイル: p1q2.py プロジェクト: masbicudo/Trabalhos-UFRJ
def grafico(e):
    plt.plot(k, np.repeat(dfa, N+1), 'k-')
    plt.plot(k, vff, 'go')
    plt.plot(k, vaf, 'ro')
    plt.plot(k, vbf, 'bo')
    
    plt.axis([0, N, dfa - e, dfa + e])
    
    plt.grid(True)
    plt.show()
コード例 #12
0
ファイル: pandasCore.py プロジェクト: dsmiff/pandasUtils
 def plotDataFrame(self, variables):
     try:
         import matplotlib.pyplot as plt
     except ImportError:
         print "Unable to import matplotlib"
     plt.plot(self.df[variables[0]], self.df[variables[1]])
     plt.xlabel(r"{}".format(variables[0]))
     plt.ylabel(r"$P$")
     plt.minorticks_on()
     plt.show()
コード例 #13
0
ファイル: stereocam.py プロジェクト: jwaixs/opensportfit
def epipolar_geometry(frame1, frame2):
    #sift = cv2.SIFT()

    # Find the keypoints and descriptors with SIFT
    #kp1, des1 = sift.detectAndCompute(frame1, None)
    #kp2, des2 = sift.detectAndCompute(frame2, None)

    # Trying ORB instead of SIFT
    orb = cv2.ORB()

    kp1, des1 = orb.detectAndCompute(frame1, None)
    kp2, des2 = orb.detectAndCompute(frame2, None)

    des1, des2 = map(numpy.float32, (des1, des2))

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0 
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good, pts1, pts2 = [], [], []

    # Ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8*n.distance:
            good.append(m)
            pts1.append(kp1[m.queryIdx].pt)
            pts2.append(kp2[m.trainIdx].pt)

    pts1 = numpy.float32(pts1)
    pts2 = numpy.float32(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    return F, mask

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img1, _ = drawlines(frame1, frame2, lines1, pts1, pts2)
    
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 2, F)
    lines2 = lines2.reshape(-1, 3)
    img2, _ = drawlines(frame2, frame1, lines2, pts2, pts1)

    matplotlib.pyplot.subplot(121)
    matplotlib.pyplot.imshow(img1)
    matplotlib.pyplot.subplot(122)
    matplotlib.pyplot.imshow(img2)
    matplotlib.show()
コード例 #14
0
ファイル: muscl_hanc.py プロジェクト: jhansel/radhydro
def plotSolutions(x,states=None): #Default func values is trivial

    plt.figure(figsize=(11,8.5))

    #get the exact values
    f = open('exact_results.txt', 'r')
    x_e = []
    u_e = []
    p_e = []
    rho_e = []
    e_e = []
    for line in f:
        if len(line.split())==1:
            t = line.split()
        else:
            data = line.split()
            x_e.append(float(data[0]))
            u_e.append(float(data[1]))
            p_e.append(float(data[2]))
            rho_e.append(float(data[4]))
            e_e.append(float(data[3]))


    if states==None:
        raise ValueError("Need to pass in states")
    else:
        u = []
        p = []
        rho = []
        e = []
        for i in states:
            u.append(i.u)
            p.append(i.p)
            rho.append(i.rho)
            e.append(i.e)

    #get edge values
    x_cent = [0.5*(x[i]+x[i+1]) for i in range(len(x)-1)]
    
    if u != None:
        plot2D(x_cent,u,"$u$",x_ex=x_e,y_ex=u_e)
    
    if rho != None:
        plot2D(x_cent,rho,r"$\rho$",x_ex=x_e,y_ex=rho_e) 

    if p != None:
        plot2D(x_cent,p,r"$p$",x_ex=x_e,y_ex=p_e)

    if e != None:
        plot2D(x_cent,e,r"$e$",x_ex=x_e,y_ex=e_e)

    plt.show(block=False) #show all plots generated to this point
    raw_input("Press anything to continue...")
    plot2D.fig_num=0
コード例 #15
0
ファイル: compress.py プロジェクト: JonasSejr/MLAU
def compress_kmeans(im, k=4):
    height, width, depth = im.shape

    data = im.reshape((height * width, depth))
    labels, centers = kmeans(data, k, 1e-2)
    rep = closest(data, centers)
    data_compressed = centers[rep]

    im_compressed = data_compressed.reshape((height, width, depth))
    plt.figure()
    plt.imshow(im_compressed)
    plt.show()
コード例 #16
0
ファイル: plot.py プロジェクト: kleikanger/vmc
def plot(data):
	fig = plt.figure(figsize=plt.figaspect(2.))
	ax = fig.add_subplot(2, 1, 1)
	ax.set_ylabel(r'$\alpha$',size=20)
	ax.set_xlabel('$N_C$',size=20)
	l = ax.plot(data[:,0],data[:,3],'r')
	l = ax.plot(data[:,0],data[:,4],'k--')
	ax = fig.add_subplot(2, 1, 2)
	ax.set_ylabel(r'$\beta$',size=20)      
	ax.set_xlabel('$N_C$',size=20)
	l = ax.plot(data[:,0],data[:,1],'r')
	l = ax.plot(data[:,0],data[:,2],'k--')
	plt.show()
コード例 #17
0
def plotHistogram(clustaArray=[]):
    if len(clustaArray) < 1:
        print "Nothing to plot!"
    else:
        # create list of times that maps to each spike
        p_sptimes = []
        for a in clustaArray:
            for b in a.spike_samples:
                p_sptimes.append(b)
        sptimes = np.array(p_sptimes)

        p_clusters = []
        for c in clustaArray:
            for d in c.id_of_spike:
                p_clusters.append(c.id_of_clusta)
        clusters = np.array(p_clusters)

        # dynamically generate cluster list
        clusterList = []
        for a in clustaArray:
            clusterList.append(a.id_of_clusta)

        # plot raster for all clusters
        # nclusters = 20

        # #for n in range(nclusters):
        timesList = []
        for n in clusterList:
            # if n<>9:
            ctimes = sptimes[clusters == n]
            timesList.append(ctimes)
            # plt.plot(ctimes, np.ones(len(ctimes))*n, '|')
        # plt.show()

        # plot frequency in Hz over time
        dt = 1 / 30000.0  # in seconds
        binSize = 1  # in seconds
        binSizeSamples = round(binSize / dt)
        recLen = np.max(sptimes)
        nbins = round(recLen / binSizeSamples)

        binCount = []
        cluster = 3
        for b in np.arange(0, nbins - 1):
            n = np.sum((timesList[cluster] > b * binSizeSamples) & (timesList[cluster] < (b + 1) * binSizeSamples))
            binCount.append(n / binSize)  # makes Hz

        plt.plot(binCount)
        plt.ylim([0, 20])
        plt.show()
コード例 #18
0
def plot_confusion_matrix(cm, labels, title='Confusion matrix', cmap=plt.cm.Blues, save=False):
    plt.figure()
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(labels))
    plt.xticks(tick_marks, labels, rotation=45)
    plt.yticks(tick_marks, labels)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()
    if save:
        plt.savefig(save)
コード例 #19
0
def plotDisparityHistogram(network=None):
    assert network is not None, "Network is not initialised! Visualising failed."
    import matplotlib.pyplot as plt
    from NetworkBuilder import sameDisparityInd
    
    spikesPerDisparityMap = []
    for d in range(0, maxDisparity-minDisparity+1):
        cellsOut = [network[x][1] for x in sameDisparityInd[d]]
        spikesPerDisparityMap.append(sum([sum(x.get_spike_counts().values()) for x in cellsOut]))
    
    print spikesPerDisparityMap
    
    plt.bar(range(0, maxDisparity-minDisparity+1), spikesPerDisparityMap, align='center')
    
    plt.show()
コード例 #20
0
def lets_paint_the_world(file):


    
	filee = open(filename)
        filee

	lon = []
	lat = []
	depth = []


        counter  = 0
        
	for line in filee.readlines(): #set a counter because computer couldn't run the whole file
	    if counter < 1000:
       	        each_line = line.split()
       	        lon.append(float(each_line[0][:]))
       	        lat.append(float(each_line[1][:]))
       	        depth.append(float(each_line[2][:]))
       	        counter += 1



	m = Basemap(projection = 'tmerc',
                    llcrnrlon= -180,
                    urcrnrlon = 180,
                    llcrnrlat= -90,
                    urcrnrlat = 90,
                    lat_0= 0,
                    lon_0= 0)

	#creates the graticule based on the coor
	x, y = m(*np.meshgrid(lon,lat))#<----------BREAKING POINT

	#plot commands
	fig = plt.figure(figsize=(10,7))
	ax = fig.add_subplot(111)

	#draws
   	m.fillcontinents(color='coral', lake_color='aqua')
        m.drawcoastlines(linewidth = .25)
        m.drawcountries(linewidth = .25)
        m.drawmeridians(np.arange(0, 360, 15))
        m.drawparallels(np.arange(-90, 90, 15))
        plt.pcolormesh(x,y,depth, [-1000,0,1000], cmap=plt.cm.RdBu_r, vmin=-100, vmax = 100)       
        plt.show()
コード例 #21
0
def plotWaveforms(clustaArray=[]):
    if len(clustaArray) < 1:
        print "Nothing to plot!"
    else:
        clustaToPlot = int(raw_input("Please enter the cluster id to plot: "))

        i = 0
        while i < len(clustaArray):
            if clustaToPlot == clustaArray[i].id_of_clusta:
                j = 0
                while j < len(clustaArray[i].waveforms):
                    k = 0
                    while k < len(clustaArray[i].waveforms[j]):
                        plt.plot([k], [clustaArray[i].waveforms[j][k]], "ro")
                        k = k + 1
                    j = j + 1
            i = i + 1
        plt.show()
コード例 #22
0
ファイル: plotter.py プロジェクト: simphys/exercises
 def make(self, ncols = 2, swindow = (17,10), sfile = (8,3)):
     if self.__nr_show == 1: 
         f, axarr = p.subplots(1, 1, figsize=swindow)
         axarr = np.array([axarr])
     elif self.__nr_show > 1:
         nrows = int(np.ceil(1.*self.__nr_show/ncols))
         f, axarr = p.subplots(nrows, ncols, figsize=swindow)
     nplots = 0
     for n,plot in enumerate(self.__plots):
         if plot.show:
             self.__show(n,plot,axarr.flatten()[nplots])
             nplots += 1
         if plot.save: 
             self.__save(n,plot,sfile)
     if self.__nr_show > 0:
         f.tight_layout()
         p.show()
     self.__reset()
コード例 #23
0
ファイル: xrd_calc.py プロジェクト: bruceravel/xraylarch
def fit_background(q,I):
    
    ## Working on background calculation
    ## mkak 2016.09.28
    
    x = q
    y = I
    pfit = np.polyfit(x,y,4)
    yfit = np.polyval(pfit,x)
    #panel.plot(xrd_spectra[0], xrd_spectra[1]-yfit, label='no bkg')
    #panel.plot(xrd_spectra[0], yfit, color='blue', label='bkg')
    
    ### calculation works, but plotting here wipes previous plots - only shows last
    import matplotlib as plt
    plt.figure()
    plt.plot(x,y,label='raw data')
    plt.plot(x,yfit,label='background')
    plt.plot(x,y-yfit,label='background subtracted')
    plt.legend()
    plt.show()
コード例 #24
0
ファイル: agent.py プロジェクト: airjulio/NanoMachineLearning
def run_grid_search(plot_heatmap=False):
    """Run the agent for a finite number of trials."""

    alpha_params = [1.0, .9, .8, .7, .6, .5, .4, .3, .2, .1, 0]
    epsilon_params = [1.0, .9, .8, .7, .6, .5, .4, .3, .2, .1, 0]
    results = {}
    table = list()
    for alpha in alpha_params:
        for epsilon in epsilon_params:
            # Set up environment and agent
            e = Environment()  # create environment (also adds some dummy traffic)
            a = e.create_agent(LearningAgent)  # create agent
            e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
            # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

            # Now simulate it
            sim = Simulator(e, update_delay=0.0,
                            display=False)  # create simulator (uses pygame when display=True, if available)
            # NOTE: To speed up simulation, reduce update_delay and/or set display=False
            trials = 1000
            a.alpha = alpha
            a.epsilon = epsilon
            sim.run(n_trials=trials)  # run for a specified number of trials
            # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
            # for i, state in enumerate(a.qtable.items()):
            #     print i, state[0]
            #     pprint.pprint(state[1])
            print 'Success rate after {} trials: {}. Initial alpha = {} and initial epsilon = {}' \
                .format(trials, sim.success / trials, alpha, epsilon)
            table.append([alpha, epsilon, sim.success / trials])
            results['a:{} e:{}'.format(alpha, epsilon)] = sim.success / trials
    if plot_heatmap:
        import pandas as pd
        import seaborn as sns
        import matplotlib as plt
        gs_data = pd.DataFrame(table, columns=['alpha', 'epsilon', 'rate'])
        gs_data_pivot = gs_data.pivot('alpha', 'epsilon', 'rate')
        sns.heatmap(gs_data_pivot)
        plt.show()
    pprint.pprint(sorted(results.items(), key=operator.itemgetter(1)))
コード例 #25
0
ファイル: lag_hydro.py プロジェクト: jhansel/radhydro
def plotSolutions(x,u=None,rho=None,p=None,e=None): #Default func values is trivial


    plt.figure(figsize=(11,8.5))

    #get edge values
    x_edge = [0.5*(x[i]+x[i+1]) for i in range(len(x)-1)]

    #get the exact values
    f = open('exact_results.txt', 'r')
    x_e = []
    u_e = []
    p_e = []
    rho_e = []
    e_e = []
    for line in f:
        if len(line.split())==1:
            t = line.split()
        else:
            data = line.split()
            x_e.append(float(data[0]))
            u_e.append(float(data[1]))
            p_e.append(float(data[2]))
            rho_e.append(float(data[4]))
            e_e.append(float(data[3]))
    
    if u != None:
        plot2D(x,u,x_e,u_e,"$u$")
    
    if rho != None:
        plot2D(x_edge,rho,x_e,rho_e,r"$\rho$") 

    if p != None:
        plot2D(x_edge,p,x_e,p_e,r"$p$")

    if e != None:
        plot2D(x_edge,e,x_e,e_e,r"$e$")

    plt.show(block=False) #show all plots generated to this point
    raw_input("Press anything to continue...")
コード例 #26
0
def plotDisparityMap(network=None, disparity=0):
    
    assert network is not None, "Network is not initialised! Visualising failed."
    assert disparity >= 0 and disparity <= maxDisparity, "No such disparity map in the network."
    import matplotlib.pyplot as plt
    from matplotlib import animation
    from NetworkBuilder import sameDisparityInd
    
    print "Visualising results for disparity value {0}...".format(disparity) 
    
    cellsOut = [network[x][2] for x in sameDisparityInd[disparity]]

    spikes = [x.getSpikes() for x in cellsOut]
#     print spikes
    
    sortedSpikes = sortSpikes(spikes)
#     print sortedSpikes
    
    framesOfSpikes = generateFrames(sortedSpikes)
#     print framesOfSpikes
    
    x = range(0, dimensionRetinaX)
    y = range(0, dimensionRetinaY)
    from numpy import meshgrid
    rows, pixels = meshgrid(x,y)
    
    fig = plt.figure()
    
    initialData = createInitialisingData()
#     print initialData
    imNet = plt.imshow(initialData, cmap='gray', interpolation='none', origin='upper')
    
    plt.xticks(range(0, dimensionRetinaX)) 
    plt.yticks(range(0, dimensionRetinaY))
    plt.title("Disparity Map {0}".format(disparity))
    args = (framesOfSpikes, imNet)
    anim = animation.FuncAnimation(fig, animate, fargs=args, frames=int(simulationTime)*10, interval=30)
          
    plt.show()
コード例 #27
0
ファイル: PCA.py プロジェクト: zhibzeng/PythonCode
def execute():
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    x = random.normal(5, .5, 1000)
    y = random.normal(3, 1, 1000)
    a = x*cos(pi/4) + y*sin(pi/4)
    b = -x*sin(pi/4) + y*cos(pi/4)
    plt.plot(a, b, '.')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('原数据集')
    data = zeros((1000, 2))
    data[:, 0] = a
    data[:, 1] = b
    x, y, evals, evecs = pca(data, 1)
    print(y)
    plt.figure()
    plt.plot(y[:, 0], y[:, 1], '.')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('重新构造数据')
    plt.show()
コード例 #28
0
ファイル: gmapper.py プロジェクト: SkBlaz/GeneMapper
    def display(self, data, candidates, fname, display):
        
        finallist=[]
        for c in candidates:
            finallist.append(c[0])
        #print finallist
        part1 = finallist[:len(finallist)/2]
        part2 = finallist[len(finallist)/2:]
        
        meandiff=int(np.sqrt(np.power(np.mean(part2),2)-np.power(np.mean(part1),2)))
        rangeA = max(part1)-min(part1)
        rangeB = max(part2)-min(part2)
        span = int((rangeA+rangeB)/2)
        dspan = int(meandiff/span)
        theta = float(meandiff/(rangeA+rangeB))
        oneortwo=""
        if dspan >3 and meandiff > 20 or meandiff>36:
            oneortwo = "Two distributions \n\n MD: %d \n Span: %d \n Dspan: %d \n theta: %d" % (meandiff, span, dspan, theta) 
        else:
            oneortwo = "One distribution \n\n MD: %d \n Span: %d \n Dspan: %d \n theta: %d" % (meandiff, span, dspan, theta)

        cans = np.array(candidates)
        plt.plot(cans[:,0],cans[:,1],'ro')
        plt.axhline(max(cans[:,1])/4, color='r')
        plt.axhline(max(cans[:,1]/2), color='r')
        plt.axhline(int(max(cans[:,1]))*0.75, color='r')
        red_patch = mpatches.Patch(color='red', label='75%, 50% and 25% \nof maximum frequency')
        plt.legend(handles=[red_patch])
        plt.ylabel('Frequency of occurence')
        plt.xlabel('separate items')
        plt.title('Frequency distribution estimation graph: %s' %(fname))
        plt.text(max(data)*1.1, max(cans[:,1])*0.62, oneortwo, fontsize = 11, color = 'r')
        plt.hist(data,range(int(min(data)),int(max(data)),1))
        ofile = fname[0:-3]+"png"
        print ("Writing outfile: %s") % (ofile)
        plt.savefig(ofile, bbox_inches='tight')
        if display == True: 
            plt.show()
        return;
コード例 #29
0
def show_analysis():
    plt.bar(flower_labels, predictions[0])
    plt.xticks(flower_labels)
    plt.show()
コード例 #30
0
Xpa = np.load('npyData/07_07/set_all_patches_X.npy')

print(' Setul de patche-uri incarcat, Xpa.shape ', Xpa.shape)

# Afisare un numar de imagini, aleator
hp = Xpa.shape[1]; wp = Xpa.shape[2]
print(' Imaginile au rezolutia: ', hp ,'x', wp )
print(' Afisare imagini (random) din setul de date incarcat: ')
cols = 4; line = 4; pl = 1;
fig = plt.figure(figsize=(cols*1.2,line*1.8))
for i in range(0, line):
    for j in range (0, cols):
        fig.add_subplot(line,cols, (i*cols+j+1)); #pl=pl+1;
        rdmImg = int(random()* Xpa.shape[0])
        plt.imshow(Xpa[rdmImg], cmap='gray'); #plt.title(str([rdmImg]))
plt.show();

# In[] Descriptori de textura - ferigarea 
# Extragerea vectorului de trasaturi
import cv2
from skimage.filters import frangi
from pyimagesearch.localbinarypatterns import LocalBinaryPatterns
#from PIL import Image

desc = LocalBinaryPatterns(26, 8)

from sklearn.cluster import KMeans
md_km_q = KMeans(n_clusters = 4, random_state=0)


コード例 #31
0
ファイル: Tests.py プロジェクト: Ymerlet/RBD-Fast
def rbdfast_test():
    """
    ISHIGAMI fonction
    Crestaux et al. (2007) and Marrel et al. (2009) use: a = 7 and b = 0.1.
    Sobol' & Levitan (1999) use: a = 7 and b = 0.05.
    """
    a = 7
    b = 0.05
    pi = np.math.pi

    def f(X): return np.sin(X[:, 0]) + a * \
        np.sin(X[:, 1])**2 + b * X[:, 2]**4 * np.sin(X[:, 0])
    ninput = 3  # def: X=[x1,x2,x3] -> xi=U(-pi,pi)
 
    E = a / 2  # ??? à quoi sert E ???
    Vx1 = 1 / 2 * (1 + b * pi**4 / 5)**2
    Vx2 = a**2 / 8
    Vx13 = b**2 * pi**8 / 225
    V = Vx1 + Vx2 + Vx13
    exact = np.matrix([[Vx1 / V, 0, 0],
                       [0, Vx2 / V, 0],
                       [Vx13 / V, 0, 0]])
    exactDiag = exact.diagonal()

    #==================== Effect of bias ======================================
    SIc = np.zeros((ninput, 450))
    SI = np.zeros((ninput, 450))
    # warning('off','RBD:lowSampleSize') # DESACTIVER LE WARNING
    for N in range(50, 500):
        X = -pi + 2 * pi * np.random.rand(N, ninput)
        Y = f(X).reshape((f(X).shape[0], f(X)[0].size))
        tSI, tSIc = rbdfast.rbdfast(Y, x=X)
        SI[:, N - 50], SIc[:, N -
                           50] = tSI.reshape((1, 3)), tSIc.reshape((1, 3))
    # warning('on','RBD:lowSampleSize') #REACTIVER LE WARNING

    # Print plot : effect of bias
    plt.plot(SI.transpose(), 'r--')
    plt.plot(SIc.transpose(), 'b--')
    plt.plot([[exactDiag.item(i) for i in range(0, 3)] for k in range(0, 450)],
             color='#003366',
             linewidth=2.0)
    plt.title('Effect of bias')
    plt.ylabel('SI')
    plt.xlabel('Simulation Number')
    plt.show()
    
    """
    #==================== Effect of sample organisation========================
    SIc2 = np.zeros((ninput,450))
    #warning('off','RBD:lowSampleSize') #???????????
    for N in range(50,500):
        X = np.zeros((N,ninput))
        #N+1 values between -pi and +pi
        s0 = np.linspace(-pi,pi,N)
        # 3 random indices for sample size N
        Index = np.matrix([[randint(0,N-1) for z in range(0,ninput)]for n in range(0,N)])
        # Assigning values to the index -> "random" values between [-pi, pi[
        s = np.zeros((N,ninput))
        for line in range(N):
            s[line,:] = s0[Index[line]]
        # Uniform sampling in [0, 1]
        for line in range(N):
            for a in range(ninput):
                X[line,a] = .5 + np.math.asin(np.math.sin(s[line][a]))/pi
        # Rescaling the uniform sampling between [-pi, pi]
        X = -pi + 2*pi*X
        Y = f(X).reshape((f(X).shape[0],f(X)[0].size))
        tSIc = rbdfast(Y, Index = Index)[1]
        SIc2[:,N-50] = tSIc.reshape((1,3))
    #warning('on','RBD:lowSampleSize') #???????????????

    plt.plot(SIc,'b--')
    plt.plot(SIc2.transpose(),'r--')
    plt.plot([[exactDiag.item(i) for i in range(0,3)] for k in range(50,500)],
               color = '#003366',
               linewidth = 2.0)
    plt.title('Effect of sample organisation')
    plt.ylabel('SI')
    plt.xlabel('Simulation Number')
    plt.show()

    #======================== Effect of M value ===============================
    SIc = np.zeros((ninput,30))
    SI = np.zeros((ninput,30))
    X = -pi + 2*pi*np.random.rand(500,ninput)
    for M in range(1,30):
        SI[:,M],SIc[:,M] = rbdfast(f(X), X = X, M = M)

    plt.plot(SIc,'b')
    plt.plot(SI,'r')
    plt.plot([[exactDiag[i] for i in range(0,3)] for k in range(50,500)],'k')
    plt.title('Effect of the M value')
    plt.ylabel('SI')
    plt.xlabel('M value')

    log.logger.info('Tests done')"""
    return
コード例 #32
0
ファイル: main.py プロジェクト: pilillo/nilmtk-greend-tests
best_devices = test_elec.submeters().select_top_k(k=5)

test_elec.mains().plot()

fhmm = fhmm_exact.FHMM()
fhmm.train(best_devices, sample_period=60)

# Save disaggregation to external dataset
#output = HDFDataStore('/home/andrea/Desktop/nilmtk_tests/redd.disag-fhmm.h5', 'w')
"""
fhmm.disaggregate(test_elec.mains(), output, sample_period=60)
output.close()

# read result from external file
disag_fhmm = DataSet(output)
disag_fhmm_elec = disag_fhmm.buildings[building].elec

disagg_fhmm.plot()
"""
"""
from nilmtk.metrics import f1_score
f1_fhmm = f1_score(disag_fhmm_elec, test_elec)
f1_fhmm.index = disag_fhmm_elec.get_labels(f1_fhmm.index)
f1_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("FHMM");
"""
plt.show()

コード例 #33
0
def calculate_SQ(bandgap_ev,
                 temperature=300,
                 fr=1,
                 plot_current_voltage=False):
    """
    Args:
        bandgap_ev: bandga in electron-volt
        temperature: temperature in K
    Returns:
         
    """

    # Defining constants for tidy equations
    c = constants.c  # speed of light, m/s
    h = constants.h  # Planck's constant J*s (W)
    h_e = constants.h / constants.e  # Planck's constant eV*s
    k = constants.k  # Boltzmann's constant J/K
    k_e = constants.k / constants.e  # Boltzmann's constant eV/K
    e = constants.e  # Coulomb

    # Load the Air Mass 1.5 Global tilt solar spectrum
    solar_spectrum_data_file = str(
        os.path.join(os.path.dirname(__file__), "am1.5G.dat"))
    solar_spectra_wavelength, solar_spectra_irradiance = np.loadtxt(
        solar_spectrum_data_file, usecols=[0, 1], unpack=True, skiprows=2)

    solar_spectra_wavelength_meters = solar_spectra_wavelength * 1e-9

    # need to convert solar irradiance from Power/m**2(nm) into
    # photon#/s*m**2(nm) power is Watt, which is Joule / s
    # E = hc/wavelength
    # at each wavelength, Power * (wavelength(m)/(h(Js)*c(m/s))) = ph#/s
    solar_spectra_photon_flux = solar_spectra_irradiance * (
        solar_spectra_wavelength_meters / (h * c))

    ### Calculation of total solar power incoming
    power_in = simps(solar_spectra_irradiance, solar_spectra_wavelength)

    # calculation of blackbody irradiance spectra
    # units of W/(m**3), different than solar_spectra_irradiance!!! (This
    # is intentional, it is for convenience)
    blackbody_irradiance = (2.0 * h * c ** 2 /
                            (solar_spectra_wavelength_meters ** 5)) \
                           * (1.0 / ((np.exp(h * c / (
        solar_spectra_wavelength_meters * k * temperature))) - 1.0))

    # I've removed a pi in the equation above - Marnik Bercx

    # now to convert the irradiance from Power/m**2(m) into photon#/s*m**2(m)
    blackbody_photon_flux = blackbody_irradiance * (
        solar_spectra_wavelength_meters / (h * c))

    # absorbance interpolation onto each solar spectrum wavelength
    from numpy import interp

    # Get the bandgap in wavelength in meters
    bandgap_wavelength = h_e * c / bandgap_ev

    # Only take the part of the wavelength-dependent solar spectrum and
    # blackbody spectrum below the bandgap wavelength
    bandgap_index = np.searchsorted(solar_spectra_wavelength_meters,
                                    bandgap_wavelength)

    bandgap_irradiance = interp(np.array([
        bandgap_wavelength,
    ]), solar_spectra_wavelength_meters, solar_spectra_photon_flux)

    bandgap_blackbody = (2.0 * h * c ** 2 /
                            (bandgap_wavelength ** 5)) \
                           * (1.0 / ((np.exp(h * c / (
        bandgap_wavelength * k * temperature))) - 1.0)) * (
        bandgap_wavelength / (h * c))

    integration_wavelength = np.concatenate(
        (solar_spectra_wavelength_meters[:bandgap_index],
         np.array([
             bandgap_wavelength,
         ])),
        axis=0)

    integration_solar_flux = np.concatenate(
        (solar_spectra_photon_flux[:bandgap_index], bandgap_irradiance),
        axis=0)

    integration_blackbody = np.concatenate(
        (blackbody_photon_flux[:bandgap_index], np.array([bandgap_blackbody])),
        axis=0)

    #  Numerically integrating irradiance over wavelength array
    # Note: elementary charge, not math e!  ## units of A/m**2   W/(V*m**2)
    J_0_r = e * np.pi * simps(integration_blackbody, integration_wavelength)

    J_0 = J_0_r / fr

    #  Numerically integrating irradiance over wavelength array
    # elementary charge, not math e!  ### units of A/m**2   W/(V*m**2)
    J_sc = e * simps(integration_solar_flux * 1e9, integration_wavelength)

    #    J[i] = J_sc - J_0*(1 - exp( e*V[i]/(k*T) ) )
    #   #This formula from the original paper has a typo!!
    #    J[i] = J_sc - J_0*(exp( e*V[i]/(k*T) ) - 1)
    #   #Bercx chapter and papers have the correct formula (well,
    #   the correction on one paper)
    def J(V):
        J = J_sc - J_0 * (np.exp(e * V / (k * temperature)) - 1.0)
        return J

    def power(V):
        p = J(V) * V
        return p

    # A more primitive, but perfectly robust way of getting a reasonable
    # estimate for the maximum power.
    test_voltage = 0
    voltage_step = 0.001
    while power(test_voltage + voltage_step) > power(test_voltage):
        test_voltage += voltage_step

    max_power = power(test_voltage)

    # Calculate the maximized efficience
    efficiency = max_power / power_in

    # This segment isn't needed for functionality at all, but can display a
    # plot showing how the maximization of power by choosing the optimal
    # voltage value works
    if plot_current_voltage:
        V = np.linspace(0, 2, 200)
        plt.plot(V, J(V))
        plt.plot(V, power(V), linestyle='--')
        plt.show()
        print(max_power)

    return efficiency
コード例 #34
0
def final_decision_plot(df,
                        z_s=10,
                        z_b=10,
                        show=False,
                        block=False,
                        trafoD_bins=True,
                        bin_number=15):
    """Plots histogram decision score output of classifier"""

    nJets = df['nJ'].tolist()[1]

    if trafoD_bins == True:
        bins, arg2, arg3 = trafoD_with_error(df)
        print(len(bins))
    else:
        bins = np.linspace(-1, 1, bin_number + 1)

    # Initialise plot stuff
    plt.ion()
    plt.close("all")
    fig = plt.figure(figsize=(8.5, 7))
    plot_range = (-1, 1)
    plot_data = []
    plot_weights = []
    plot_colors = []
    plt.rc('font', weight='bold')
    plt.rc('xtick.major', size=5, pad=7)
    plt.rc('xtick', labelsize=10)

    plt.rcParams["font.weight"] = "bold"
    plt.rcParams["axes.labelweight"] = "bold"
    plt.rcParams["mathtext.default"] = "regular"

    df = setBinCategory(df, bins)

    bins = np.linspace(-1, 1, len(bins))

    decision_value_list = df['bin_scaled'].tolist()
    post_fit_weight_list = df['post_fit_weight'].tolist()
    sample_list = df['sample'].tolist()

    # Get list of hists.
    for t in class_names_grouped[::-1]:
        class_names = class_names_map[t]
        class_decision_vals = []
        plot_weight_vals = []
        for c in class_names:
            for x in range(0, len(decision_value_list)):
                if sample_list[x] == c:
                    class_decision_vals.append(decision_value_list[x])
                    plot_weight_vals.append(post_fit_weight_list[x])

        plot_data.append(class_decision_vals)
        plot_weights.append(plot_weight_vals)
        plot_colors.append(colour_map[t])

    # Plot.
    if nJets == 2:

        multiplier = 20
    elif nJets == 3:
        multiplier = 100

    plt.plot([], [],
             color='#FF0000',
             label=r'VH $\rightarrow$ Vbb x ' + str(multiplier))

    plt.hist(plot_data,
             bins=bins,
             weights=plot_weights,
             range=plot_range,
             rwidth=1,
             color=plot_colors,
             label=legend_names[::-1],
             stacked=True,
             edgecolor='none')

    df_sig = df.loc[df['Class'] == 1]

    plt.hist(df_sig['bin_scaled'].tolist(),
             bins=bins,
             weights=(df_sig['post_fit_weight'] * multiplier).tolist(),
             range=plot_range,
             rwidth=1,
             histtype='step',
             linewidth=2,
             color='#FF0000',
             edgecolor='#FF0000')

    x1, x2, y1, y2 = plt.axis()
    plt.yscale('log', nonposy='clip')
    plt.axis((x1, x2, y1, y2 * 1.2))
    axes = plt.gca()
    axes.set_ylim([5, 135000])
    axes.set_xlim([-1, 1])
    x = [-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1]
    plt.xticks(x, x, fontweight='normal', fontsize=20)
    y = [r"10", r"10$^{2}$", r"10$^{3}$", r"10$^{4}$", r"10$^{5}$"]
    yi = [10, 100, 1000, 10000, 100000]
    plt.yticks(yi, y, fontweight='normal', fontsize=20)

    axes.yaxis.set_ticks_position('both')
    axes.yaxis.set_tick_params(which='major',
                               direction='in',
                               length=10,
                               width=1)
    axes.yaxis.set_tick_params(which='minor',
                               direction='in',
                               length=5,
                               width=1)

    axes.xaxis.set_ticks_position('both')
    axes.xaxis.set_tick_params(which='major',
                               direction='in',
                               length=10,
                               width=1)
    axes.xaxis.set_tick_params(which='minor',
                               direction='in',
                               length=5,
                               width=1)

    axes.xaxis.set_minor_locator(AutoMinorLocator(4))
    handles, labels = axes.get_legend_handles_labels()

    #Weird hack thing to get legend entries in correct order
    handles = handles[::-1]
    handles = handles + handles
    handles = handles[1:12]

    plt.legend(loc='upper right',
               ncol=1,
               prop={'size': 12},
               frameon=False,
               handles=handles)

    plt.ylabel("Events", fontsize=20, fontweight='normal')
    axes.yaxis.set_label_coords(-0.07, 0.93)
    plt.xlabel(r"BDT$_{VH}$ output", fontsize=20, fontweight='normal')
    axes.xaxis.set_label_coords(0.89, -0.07)
    an1 = axes.annotate("ATLAS Internal",
                        xy=(0.05, 0.91),
                        xycoords=axes.transAxes,
                        fontstyle='italic',
                        fontsize=16)

    offset_from = OffsetFrom(an1, (0, -1.4))
    an2 = axes.annotate(r'$\sqrt{s}$' + " = 13 TeV , 36.1 fb$^{-1}$",
                        xy=(0.05, 0.91),
                        xycoords=axes.transAxes,
                        textcoords=offset_from,
                        fontweight='normal',
                        fontsize=12)

    offset_from = OffsetFrom(an2, (0, -1.4))
    an3 = axes.annotate("1 lepton, " + str(nJets) + " jets, 2 b-tags",
                        xy=(0.05, 0.91),
                        xycoords=axes.transAxes,
                        textcoords=offset_from,
                        fontstyle='italic',
                        fontsize=12)

    offset_from = OffsetFrom(an3, (0, -1.6))
    an4 = axes.annotate("p$^V_T \geq$ 150 GeV",
                        xy=(0.05, 0.91),
                        xycoords=axes.transAxes,
                        textcoords=offset_from,
                        fontstyle='italic',
                        fontsize=12)

    plt.show(block=block)

    return fig, axes
コード例 #35
0
    indexes = []
    peaks = find_Peak(fft_y)
    i = 1
    outfile = open('result-12Peak.txt', 'w')

    outfile.write("\n")
    outfile.write("Point_Count: %s \n" % len(fft_y))
    outfile.write('ID  N     Value' + "\n")
    outfile.write('--- ----- ---------------------------\n')

    print("\n")
    print("Point_Count: ", len(fft_y), "\n")
    print('ID  N     Value')
    print('--- ----- ---------------------------')
    for posOrNegPeaks in peaks:
        for peak in posOrNegPeaks:
            indexes.append(peak[0])
            if i >= 2 and i < 13:
                print('%-3i %-5i %s' % (i, peak[0], abs(peak[1])))
                outfile.write('%-3i %-5i %s \n' % (i, peak[0], abs(peak[1])))
            i += 1

    print('--- ----- ---------------------------')
    outfile.write('--- ----- ---------------------------')

    # 畫第3個圖
    plot_peaks(abs(fft_y), np.array(indexes), algorithm='FFT_PeakLoad')

    pylab.show()
コード例 #36
0
def performace(transactions, strategy):
    # strategy = pdatas.copy();
    # strategy 是一个Dataframe 有8列 1:时间 2:宏观 3:价格CLOSE 4:position 5:flag 6.ret 7.nav 8.benchmark

    N = 250

    # 年化收益率
    rety = strategy.nav[strategy.shape[0] - 1]**(N / strategy.shape[0]) - 1
    bench_rety = strategy.benchmark[strategy.shape[0] -
                                    1]**(N / strategy.shape[0]) - 1

    # 夏普比
    Sharp = (strategy.ret * strategy.position).mean() / (
        strategy.ret * strategy.position).std() * np.sqrt(N)

    # 胜率
    VictoryRatio = ((transactions.pricesell - transactions.pricebuy) >
                    0).mean()

    DD = 1 - strategy.nav / strategy.nav.cummax()
    MDD = max(DD)

    # 策略逐年表现??????
    # strategy['date']=strategy.index.copy()
    # strategy['date']=pd.DataFrame(strategy['date']).to_datetime()
    #
    # # strategy['year'] = strategy.DateTime.apply(lambda x:x[:4])
    nav_peryear = strategy.nav.groupby(
        strategy.year).last() / strategy.nav.groupby(strategy.year).first() - 1
    benchmark_peryear = strategy.benchmark.groupby(strategy.year).last(
    ) / strategy.benchmark.groupby(strategy.year).first() - 1

    excess_ret = nav_peryear - benchmark_peryear
    result_peryear = pd.concat([nav_peryear, benchmark_peryear, excess_ret],
                               axis=1)
    result_peryear.columns = ['strategy_ret', 'bench_ret', 'excess_ret']
    result_peryear = result_peryear.T

    # 作图
    xtick = np.round(np.linspace(0, strategy.shape[0] - 1, 7), 0).astype(int)
    xticklabel = strategy.index[xtick].strftime("%y %b")

    plt.figure(figsize=(9, 4))
    ax1 = plt.axes()
    plt.plot(np.arange(strategy.shape[0]),
             strategy.benchmark,
             'black',
             label='benchmark',
             linewidth=2)
    plt.plot(np.arange(strategy.shape[0]),
             strategy.nav,
             'red',
             label='nav',
             linewidth=2)
    # plt.plot(np.arange(strategy.shape[0]), strategy.nav / strategy.benchmark, 'orange', label='RS', linewidth=2)
    # plt.plot(np.arange(strategy.shape[0]), 1 , 'grey', label='1', linewidth=1)

    plt.plot(np.arange(strategy.shape[0]),
             strategy.zb / 50000 + 1,
             'orange',
             label='zb',
             linewidth=2)
    # plt.plot(np.arange(strategy.shape[0]), strategy.asset, 'blue', label='asset', linewidth=2)

    # plt.plot(np.arange(strategy.shape[0]), strategy.jtf_roll/50000+1 , 'blue', label='jtf_roll', linewidth=2)
    lim = [1] * 120
    plt.plot(lim, "r--")

    plt.legend()

    ax1.set_xticks(xtick)
    ax1.set_xticklabels(xticklabel)
    plt.savefig("/Users/feitongliu/Desktop/数据/jtf_momentum.png", dpi=100)
    plt.show()

    maxloss = min(transactions.pricesell / transactions.pricebuy - 1)
    print('------------------------------')
    print('夏普比为:', round(Sharp, 2))
    print('年化收益率为:{}%'.format(round(rety * 100, 2)))
    print('benchmark年化收益率为:{}%'.format(round(bench_rety * 100, 2)))
    print('胜率为:{}%'.format(round(VictoryRatio * 100, 2)))
    print('最大回撤率为:{}%'.format(round(MDD * 100, 2)))
    # print('单次最大亏损为:{}%'.format(round(-maxloss * 100, 2)))
    print('月均交易次数为:{}(买卖合计)'.format(
        round(strategy.flag.abs().sum() / strategy.shape[0] * 20, 2)))

    result = {
        'Sharp': Sharp,
        'RetYearly': rety,
        'WinRate': VictoryRatio,
        'MDD': MDD,
        'maxlossOnce': -maxloss,
        'num': round(strategy.flag.abs().sum() / strategy.shape[0], 1)
    }

    result = pd.DataFrame.from_dict(result, orient='index').T

    return result, result_peryear
コード例 #37
0
def geoId_cases(df):
    df.groupby('geoId')['cases'].plot(kind='bar')
    plt.show()
コード例 #38
0
ファイル: clus_get_xid.py プロジェクト: rit-rsz/rsz
def clus_get_xid(maps,
                 cats,
                 savemap=0,
                 simmap=0,
                 verbose=1,
                 confusionerrors=1):
    err = False
    thresh = 3.0
    mf = 1.0

    mJy2Jy = 1000.0 / mf
    catfile = config.CLUSDATA + 'placeholder'  #this doesn't actually seem to do anything in the xid code,
    # but you need something for this for some reason.

    #Old code not used anymore
    # print('Retrieving data from cats')
    # inra = []
    # indec = []
    # for i in range(len(cats)):
    #     for j in range(len(cats[i]['ra'])):
    #         if cats[i]['ra'][j] not in inra and cats[i]['dec'][j] not in indec:
    #             inra.append(cats[i]['ra'][j])
    #             indec.append(cats[i]['dec'][j])
    # print('Done retrieving data from cats')

    inra = np.array(cats['ra'])
    indec = np.array(cats['dec'])

    ra = inra * u.deg
    dec = indec * u.deg
    c = SkyCoord(ra, dec, unit='deg')
    plt.scatter(ra, dec, c=cats['flux'], alpha=0.5)
    plt.show()

    print(inra)
    #initializing data containers.
    pinds = []
    files = []
    primary_hdus = []
    noise_maps = []
    data_maps = []
    headers = []
    pixsizes = []
    prf_sizes = []
    priors = []
    prfs = []
    for i in range(len(maps)):
        bands = [18, 25, 36]  #units of arcseconds
        fwhm = bands[i] / maps[i]['pixsize']  #converts to arcseconds/pixel
        pixs = maps[i]['pixsize']
        size = pixs * 5
        moc = pymoc.util.catalog.catalog_to_moc(c, size, 15)
        #getting data from the fits files
        files.append(maps[i]['file'])
        hdul = fits.open(files[i])
        headers.append(hdul[1].header)
        primary_hdus.append(hdul[0].header)
        img = hdul[1].data
        data_maps.append(img)
        noise_maps.append(hdul[2].data)
        pixsizes.append(maps[i]['pixsize'])
        prf_sizes.append(get_spire_beam_fwhm(maps[i]['band']))
        pinds.append(np.arange(0, 101, 1) * 1.0 / pixsizes[i])
        # print(maps[i]['file'])
        # print(pixsizes[i])
        #setting up priors
        prior = xidplus.prior(data_maps[i],
                              noise_maps[i],
                              primary_hdus[i],
                              headers[i],
                              moc=moc)
        prior.prior_cat(inra, indec, catfile, moc=moc)
        prior.prior_bkg(-5.0, 5)

        #setting up prfs.
        # This prf doesnt quite look correct
        # In previous set up we needed to rebin to accuratly describe our beam sizes
        prf = Gaussian2DKernel(
            bands[i] / 2.355, x_size=101,
            y_size=101)  #maybe x_size and y_size need to change.
        prf.normalize(mode='peak')
        prfs.append(prf.array)
        # print(prfs)
        exit()
        #appending prf to prior and setting point matrix
        prior.set_prf(prfs[i], pinds[i], pinds[i])  #prfs, xpinds, ypinds
        prior.get_pointing_matrix()
        prior.upper_lim_map()

        #appending prior to priors list.
        priors.append(prior)

    print('fitting %s sources' % (priors[0].nsrc))
    print('using %s %s %s pixels' %
          (priors[0].snpix, priors[1].snpix, priors[2].snpix))

    fit = SPIRE.all_bands(
        priors[0], priors[1], priors[2], iter=1000
    )  #number of iterations should be at least 100 just set lower for testing.
    posterior = xidplus.posterior_stan(fit, [priors[0], priors[1], priors[2]])

    # figs, fig = xidplus.plots.plot_Bayes_pval_map(priors, posterior)
    # print(type(figs)) #figs is list.
    # print(figs) #fig is matplotlib.figure.figure object.
    # print(type(fig))
    # cols = ['PSW', 'PMW', 'PLW']
    # counter = 0
    # for figure in figs:
    #     figure.save('xid_%s.png' %(cols[counter]))
    #     counter += 1

    # plt.imshow(figs)

    spire_cat = cat.create_SPIRE_cat(posterior, priors[0], priors[1],
                                     priors[2])

    # spire_cat.writeto('xid_model_2_%s.fits' % (maps[0]['name']))

    xid_data = spire_cat[1].data
    xid = []

    #in units of mJy for fluxes and degrees for RA/DEC
    xid1 = {
        'band': 'PSW',
        'sra': xid_data.field('RA'),
        'sdec': xid_data.field('DEC'),
        'sflux': xid_data.field('F_SPIRE_250'),
        'serr': xid_data.field(
            'FErr_SPIRE_250_u'
        ),  #there was also FErr_SPIRE_250_l don't know which to use.
        'pflux': xid_data.field('F_SPIRE_250'),
        'perr': xid_data.field('FErr_SPIRE_250_u'),
        'model': None,
        'mf': mf
    }  #idk if perr and pflux is right there may be a conversion needed for pflux.
    #in mikes code it has pflux = output from xid / mJy2Jy.
    xid2 = {
        'band': 'PMW',
        'sra': xid_data.field('RA'),
        'sdec': xid_data.field('DEC'),
        'sflux': xid_data.field('F_SPIRE_350'),
        'serr': xid_data.field('FErr_SPIRE_350_u'),
        'pflux': xid_data.field('F_SPIRE_350'),
        'perr': xid_data.field('FErr_SPIRE_350_u'),
        'model': None,
        'mf': mf
    }

    xid3 = {
        'band': 'PLW',
        'sra': xid_data.field('RA'),
        'sdec': xid_data.field('DEC'),
        'sflux': xid_data.field('F_SPIRE_500'),
        'serr': xid_data.field('FErr_SPIRE_500_u'),
        'pflux': xid_data.field('F_SPIRE_500'),
        'perr': xid_data.field('FErr_SPIRE_500_u'),
        'model': None,
        'mf': mf
    }

    #there was another term in the dictionary sflux, pflux and sflux looked like maybe the same thing, but I'm not sure.
    #I left it out so if there are issues with that then it is because that is gone.
    xid.append(xid1)
    xid.append(xid2)
    xid.append(xid3)

    # models = create_model(maps, xid)

    # for i in range(len(xid)):
    #     xid[i]['model'] = models[i]

    # # only look at data with a flux lower than 0.0
    # for i in range(len(xid)):
    #     whpl = []
    #     for j in range(xid[i]['model'].shape[0]):
    #         for k in range(xid[i]['model'].shape[1]):
    #         if xid[i]['pflux'][j] >= 0.0:
    #             whpl.append(j)
    #     whpl = np.array(whpl)
    #
    #     xid[i]['sra'] = xid[i]['sra'][whpl]
    #     xid[i]['sdec'] = xid[i]['sdec'][whpl]
    #     xid[i]['x'] = xid[i]['x'][whpl]
    #     xid[i]['y'] = xid[i]['y'][whpl]
    #     xid[i]['sflux'] = xid[i]['sflux'][whpl]
    #     xid[i]['serr'] = xid[i]['serr'][whpl]

    for i in range(len(xid)):
        ra = xid[i]['sra'] * u.deg
        dec = xid[i]['sdec'] * u.deg
        c = SkyCoord(ra, dec)
        #initializing w class.
        hdul = fits.open(maps[i]['file'])
        w = wcs(hdul[1].header)
        #converting ra/dec to pixel coords.
        px, py = skycoord_to_pixel(c, w)
        xid[i]['x'] = px
        xid[i]['y'] = py
        xid[i]['sra'] = xid[i]['sra'].tolist()
        xid[i]['sdec'] = xid[i]['sdec'].tolist()
        xid[i]['sflux'] = xid[i]['sflux'].tolist()
        xid[i]['serr'] = xid[i]['serr'].tolist()
        xid[i]['pflux'] = xid[i]['pflux'].tolist()
        xid[i]['perr'] = xid[i]['perr'].tolist()
        xid[i]['x'] = xid[i]['x'].tolist()
        xid[i]['y'] = xid[i]['y'].tolist()

        #saving to json file for further analysis.
        with open('xid_a0370_take_9_%s.json' % (xid[i]['band']),
                  'w') as f:  #code for saving output to a file.
            json.dump(xid[i], f)

    #model = image_model(x,y, sflux, maps[i]['astr']['NAXIS'][0], maps[i]['astr']['NAXIS'][1],
    #maps[i]['psf'])
    #need to finish converting model over to python.

    #
    # if savemap:
    #     outfile = config.CLUSSBOX + 'clus_get_xid_model_' + maps[i]['band'] + '.fit'
    #     writefits(outfile, data=model, header_dict=maps[i]['shead'])

    return xid, err
コード例 #39
0
ファイル: main.py プロジェクト: georgemarshall180/Dev
def main():
    bank = 100
    wagers = 100
    wagerunit = 10
    wins = 0
    loses = 0
    push = 0
    x = 0
    result = dict()
    even = 0
    odd = 0
    ## end_result = stat_collector('test', 0)

    while x <= wagers:

        # Game 1 Blackjack
        result = Games.blackJack()
        if result == 'win':
            wins = wins + 1
        elif result == 'lose':
            loses = loses + 1
        elif result == 'push':
            push = push + 1
        x = x + 1
        print("run:" + str(x))

    print("wins:" + str(wins))
    print("Loses:" + str(loses))
    print("Push:" + str(push))
    print("chance of winning:", wins / wagers)
    print("chance of loses:", loses / wagers)
    print("chance of push:", push / wagers)
    '''
    ## Game 2 Roulette
    ##
    mynumber = Games.roulette()
    result = Games.roulette()
    print('the winner is:' + repr(result))
    if result == mynumber:
        end_result.append(win, 1)
    elif result != mynumber:
        end_result.append(lose, 1)
    #if (result[0] %2) == 0:
        #even
        even =+ 1

    #elif result:
        #odd
        odd =+ 1


    ##    push = push + 1
    ## x = x + 1
    # print("run:" + str(x))
    '''

    ## game3 = Games.craps()

    ## game4 = Games.oddsgame(.5)

    ## while (wagers > 0) and (bank >0):
    ##    game

    plt.plot(result)
    plt.ylabel('')
    plt.xlabel(' ')
    plt.show()
コード例 #40
0
def getFiles(filePath):
    files = os.listdir(filePath)
    #config
    l1isz = []
    l1dsz = []
    l2sz = []
    clk = []
    blksz = []
    #result
    instNum = []
    tick = []
    l1iMissRate = []
    l1dMissRate = []
    l2MissRate = []
    time = []
    with open(os.path.join(rootPath, 'write.csv'), 'w') as w:

        for fi in files:
            fi_d = os.path.join(filePath, fi)
            if os.path.isdir(fi_d):
                if re.search(r'(.*)l1i(.*)', fi_d, re.I):
                    print fi
                    #every single config
                    with open(os.path.join(fi_d, 'para.txt'), 'r') as f:
                        content = f.read()
                        pattern = re.compile(r'([^-=]*=[^-\s\D]*)')
                        params = pattern.findall(content)
                        #creat dict
                        dict1 = {}
                        for para in params:
                            tmp = para.split(r'=')
                            dict1[tmp[0]] = tmp[1]
                        print dict1
                        #add to list
                        l1isz.append(int(dict1.get('l1isz') or '64'))
                    print l1isz
                    with open(os.path.join(fi_d, 'stats.txt'), 'r') as f:
                        dict2 = {}
                        content = f.read()
                        tmp = re.compile(r'sim_insts[\s]*([^\s]*)').search(
                            content)
                        if tmp:
                            # dict2['sim_insts']=tmp.group(1)
                            instNum.append(int(tmp.group(1)))

                        tmp = re.compile(r'final_tick[\s]*([^\s]*)').search(
                            content)
                        if tmp:
                            tick.append(int(tmp.group(1)))

                        tmp = re.compile(
                            r'system.cpu.icache.overall_miss_rate::total[\s]*([^\s]*)'
                        ).search(content)
                        if tmp:
                            l1iMissRate.append(float(tmp.group(1)))

                        tmp = re.compile(
                            r'system.cpu.dcache.overall_miss_rate::total[\s]*([^\s]*)'
                        ).search(content)
                        if tmp:
                            l1dMissRate.append(float(tmp.group(1)))

                        tmp = re.compile(
                            r'system.l2cache.overall_miss_rate::total[\s]*([^\s]*)'
                        ).search(content)
                        if tmp:
                            l2MissRate.append(float(tmp.group(1)))

                else:
                    #getFiles(fi_d)
                    pass

            else:
                # with open(os.path.join(filePath,fi),'r') as f:
                # 	content=f.read()
                # 	search=re.finditer( r'--(\w*)=(\D?)(\d\w+)',content,re.I)
                # 	for match in search:
                # 		print "in file ",os.path.join(filePath,fi)," ",match.group(1)," *=* ",match.group(3)
                pass
    drawDict = {}
    for i in range(0, len(l1isz)):
        drawDict[l1isz[i]] = l1iMissRate[i]

    toDraw = [(key, drawDict[key]) for key in sorted(drawDict.keys())]
    x = list(map(lambda (x, y): x, toDraw))
    y = list(map(lambda (x, y): y, toDraw))
    print x
    print y

    plt.plot(x, y)
    plt.show()
コード例 #41
0
def plot_month(data, start_day):
    start = datetime.strptime(start_day, '%Y-%m-%d')
    end = start + datetime.timedelta(days=30)
    plt.plot(data[start:end])
    plt.show()
コード例 #42
0
def plot_series(data, index):
    series = data.loc[data['series'] == index].drop('series', axis=1)
    plt.plot(series)
    plt.show()
コード例 #43
0
def sh3(data):
    plt.imshow(data)
    plt.show()
コード例 #44
0
def main(path, start, end):

    cluster_id = []
    cluster_size = []
    cluster_ratio = []
    species_num = []
    species_no = []

    #with open(path, 'r') as file_to_read:
    with open(data_file) as file_to_read:
        while True:
            lines = file_to_read.readline()  #整行读取数据
            if not lines:
                break
            #将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。
            [id, size, ratio, num,
             no] = [(row) for row in lines.strip("\n").split(';')]
            cluster_id.append(id)  #添加新读取的数据
            cluster_size.append(size)
            cluster_ratio.append(ratio)
            species_num.append(num)
            species_no.append(no)

    new_cluster_size = [int(i) for i in cluster_size]
    new_species_num = [int(i) for i in species_num]

    species = []
    species_dict = {}
    for i in range(len(species_no)):
        dict = {}
        everySpec = species_no[i].split('|')
        for j in range(len(everySpec)):
            Spec = everySpec[j].split('=')
            dict.setdefault(Spec[0], Spec[1])
            if not Spec[0] in species_dict:
                species_dict[Spec[0]] = 1
            else:
                species_dict[Spec[0]] = species_dict[Spec[0]] + 1
        species.append(dict)

    file = open("./Data/species_No_02", "a+", encoding='utf-8')
    keyList = species_dict.keys()
    for key in keyList:
        file.write(key + '\n')
    file.close()

    subCluster_id = []
    subCluster_size = []
    speciesNuminCluster = []
    speciesRatio = []
    for i in range(start - 1, end):
        subCluster_id.append(cluster_id[i])
        subCluster_size.append(new_cluster_size[i])
        speciesNuminCluster.append(new_species_num[i])
        subSpeciesRatio = round(new_species_num[i] / new_cluster_size[i], 5)
        speciesRatio.append(subSpeciesRatio)

    #画柱形图
    width = 0.3
    # 设置x轴柱子的个数
    ind = np.arange(len(subCluster_id))
    fig = plt.figure(1, figsize=(16, 10), dpi=80)
    ax1 = fig.add_subplot(121)
    bar_cluster_size = ax1.bar(ind,
                               subCluster_size,
                               width,
                               color='forestgreen')
    bar_species_no = ax1.bar(ind + width,
                             speciesNuminCluster,
                             width,
                             color='gold')
    ax1.set_xticks(ind + width / 2)
    ax1.set_xticklabels(subCluster_id, rotation=90)
    ax1.set_xlabel('cluster_id')
    ax1.set_ylabel('cluster_size/species_num')
    ax1.set_title("Species Information in Clustering File")
    add_labels(bar_cluster_size)
    add_labels(bar_species_no)
    plt.grid(True)

    #画All Species Ratio of Cluster 饼图
    ax2 = fig.add_subplot(222)
    labels = 'species_ratio:' + '%s' % speciesRatio[0], 'other'
    sizes = speciesRatio[0], (1 - speciesRatio[0])
    colors = 'lightgreen', 'gold'
    explode = 0, 0
    ax2.pie(sizes,
            explode=explode,
            labels=labels,
            colors=colors,
            autopct='%1.1f%%',
            shadow=True,
            startangle=50)
    ax2.axis('equal')
    ax2.set_title("All Species Ratio of " + subCluster_id[0])

    #画Each Species Ratio of Cluster
    labels2 = []
    size2 = []
    expl2 = []
    ax3 = fig.add_subplot(224)
    for key, value in species[start - 1].items():
        labels2.append(key)
        size2.append(int(value))
        expl2.append(int(0))
    ax3.pie(size2,
            explode=expl2,
            labels=labels2,
            autopct='%1.1f%%',
            startangle=50)
    ax3.axis('equal')
    ax3.set_title("Each Species Ratio of " + subCluster_id[0])

    plt.show()
    plt.close()
def process(path):
    msev = []
    maev = []
    rsqv = []
    rmsev = []
    acyv = []

    df = pd.read_csv(path, encoding="latin-1")

    x1 = np.array(df['Lyrics'].values.astype('U'))
    y1 = np.array(df['MoodValue'])
    print(x1)
    print(y1)

    print(x1)
    print(y1)
    X_train, X_test, y_train, y_test = train_test_split(x1, y1, test_size=0.20)

    count_vectorizer = CountVectorizer(stop_words='english')
    count_train = count_vectorizer.fit_transform(
        X_train
    )  # Learn the vocabulary dictionary and return term-document matrix.
    count_test = count_vectorizer.transform(X_test)

    tfidf_vectorizer = TfidfVectorizer(
        stop_words='english', max_df=0.7
    )  # This removes words which appear in more than 70% of the articles
    tfidf_train = tfidf_vectorizer.fit_transform(X_train)
    tfidf_test = tfidf_vectorizer.transform(X_test)

    model2 = DecisionTreeClassifier()
    model2.fit(count_train, y_train)
    y_pred = model2.predict(count_test)
    print("predicted")
    print(y_pred)
    print("test")
    print(y_test)

    result2 = open("results/resultCOUNTDT.csv", "w")
    result2.write("ID,Predicted Value" + "\n")
    for j in range(len(y_pred)):
        result2.write(str(j + 1) + "," + str(y_pred[j]) + "\n")
    result2.close()

    mse = mean_squared_error(y_test, y_pred)
    mae = mean_absolute_error(y_test, y_pred)
    r2 = abs(r2_score(y_test, y_pred))

    print("---------------------------------------------------------")
    print("MSE VALUE FOR DecisionTree COUNT IS %f " % mse)
    print("MAE VALUE FOR DecisionTree COUNT IS %f " % mae)
    print("R-SQUARED VALUE FOR DecisionTree COUNT IS %f " % r2)
    rms = np.sqrt(mean_squared_error(y_test, y_pred))
    print("RMSE VALUE FOR DecisionTree COUNT IS %f " % rms)
    ac = accuracy_score(y_test, y_pred)
    print("ACCURACY VALUE DecisionTree COUNT IS %f" % ac)
    print("---------------------------------------------------------")

    msev.append(mse)
    maev.append(mae)
    rsqv.append(r2)
    rmsev.append(rms)
    acyv.append(ac * 100)

    result2 = open('results/COUNTDTMetrics.csv', 'w')
    result2.write("Parameter,Value" + "\n")
    result2.write("MSE" + "," + str(mse) + "\n")
    result2.write("MAE" + "," + str(mae) + "\n")
    result2.write("R-SQUARED" + "," + str(r2) + "\n")
    result2.write("RMSE" + "," + str(rms) + "\n")
    result2.write("ACCURACY" + "," + str(ac) + "\n")
    result2.close()

    df = pd.read_csv('results/COUNTDTMetrics.csv')
    acc = df["Value"]
    alc = df["Parameter"]
    colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#8c564b"]
    explode = (0.1, 0, 0, 0, 0)

    fig = plt.figure()
    plt.bar(alc, acc, color=colors)
    plt.xlabel('Parameter')
    plt.ylabel('Value')
    plt.title(' COUNT DecisionTree Metrics Value')
    fig.savefig('results/COUNTDTMetricsValue.png')
    plt.pause(5)
    plt.show(block=False)
    plt.close()

    model2 = DecisionTreeClassifier()
    model2.fit(tfidf_train, y_train)
    y_pred = model2.predict(tfidf_test)
    print("predicted")
    print(y_pred)
    print("test")
    print(y_test)

    result2 = open("results/resultTFIDFDT.csv", "w")
    result2.write("ID,Predicted Value" + "\n")
    for j in range(len(y_pred)):
        result2.write(str(j + 1) + "," + str(y_pred[j]) + "\n")
    result2.close()

    mse = mean_squared_error(y_test, y_pred)
    mae = mean_absolute_error(y_test, y_pred)
    r2 = abs(r2_score(y_test, y_pred))

    print("---------------------------------------------------------")
    print("MSE VALUE FOR DecisionTree TFIDF IS %f " % mse)
    print("MAE VALUE FOR DecisionTree TFIDF IS %f " % mae)
    print("R-SQUARED VALUE FOR DecisionTree TFIDF IS %f " % r2)
    rms = np.sqrt(mean_squared_error(y_test, y_pred))
    print("RMSE VALUE FOR DecisionTree TFIDF IS %f " % rms)
    ac = accuracy_score(y_test, y_pred)
    print("ACCURACY VALUE DecisionTree TFIDF IS %f" % ac)
    print("---------------------------------------------------------")

    msev.append(mse)
    maev.append(mae)
    rsqv.append(r2)
    rmsev.append(rms)
    acyv.append(ac * 100)

    result2 = open('results/TFIDFDTMetrics.csv', 'w')
    result2.write("Parameter,Value" + "\n")
    result2.write("MSE" + "," + str(mse) + "\n")
    result2.write("MAE" + "," + str(mae) + "\n")
    result2.write("R-SQUARED" + "," + str(r2) + "\n")
    result2.write("RMSE" + "," + str(rms) + "\n")
    result2.write("ACCURACY" + "," + str(ac) + "\n")
    result2.close()

    df = pd.read_csv('results/TFIDFDTMetrics.csv')
    acc = df["Value"]
    alc = df["Parameter"]
    colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#8c564b"]
    explode = (0.1, 0, 0, 0, 0)

    fig = plt.figure()
    plt.bar(alc, acc, color=colors)
    plt.xlabel('Parameter')
    plt.ylabel('Value')
    plt.title(' TFIDF DecisionTree Metrics Value')
    fig.savefig('results/TFIDFCOUNTDTMetricsValue.png')
    plt.pause(5)
    plt.show(block=False)
    plt.close()

    al = ['COUNT', 'TFIDF']

    result2 = open('results/DTMSE.csv', 'w')
    result2.write("Vectorization,MSE" + "\n")
    for i in range(0, len(msev)):
        result2.write(al[i] + "," + str(msev[i]) + "\n")
    result2.close()

    colors = ["#1f77b4", "#ff7f0e", "#2ca02c"]
    explode = (0.1, 0, 0, 0, 0)

    #Barplot for the dependent variable
    fig = plt.figure(0)
    df = pd.read_csv('results/DTMSE.csv')
    acc = df["MSE"]
    alc = df["Vectorization"]
    plt.bar(alc, acc, color=colors)
    plt.xlabel('Vectorization')
    plt.ylabel('MSE')
    plt.title("DecisionTree MSE Value")
    fig.savefig('results/DTMSE.png')
    plt.pause(5)
    plt.show(block=False)
    plt.close()

    result2 = open('results/DTMAE.csv', 'w')
    result2.write("Vectorization,MAE" + "\n")
    for i in range(0, len(maev)):
        result2.write(al[i] + "," + str(maev[i]) + "\n")
    result2.close()

    fig = plt.figure(0)
    df = pd.read_csv('results/DTMAE.csv')
    acc = df["MAE"]
    alc = df["Vectorization"]
    plt.bar(alc, acc, color=colors)
    plt.xlabel('Vectorization')
    plt.ylabel('MAE')
    plt.title('DecisionTree MAE Value')
    fig.savefig('results/DTMAE.png')
    plt.pause(5)
    plt.show(block=False)
    plt.close()

    result2 = open('results/DTR-SQUARED.csv', 'w')
    result2.write("Vectorization,R-SQUARED" + "\n")
    for i in range(0, len(rsqv)):
        result2.write(al[i] + "," + str(rsqv[i]) + "\n")
    result2.close()

    fig = plt.figure(0)
    df = pd.read_csv('results/DTR-SQUARED.csv')
    acc = df["R-SQUARED"]
    alc = df["Vectorization"]

    plt.bar(alc, acc, color=colors)
    plt.xlabel('Vectorization')
    plt.ylabel('R-SQUARED')
    plt.title('DecisionTree R-SQUARED Value')
    fig.savefig('results/DTR-SQUARED.png')
    plt.pause(5)
    plt.show(block=False)
    plt.close()

    result2 = open('results/DTRMSE.csv', 'w')
    result2.write("Vectorization,RMSE" + "\n")
    for i in range(0, len(rmsev)):
        result2.write(al[i] + "," + str(rmsev[i]) + "\n")
    result2.close()

    fig = plt.figure(0)
    df = pd.read_csv('results/DTRMSE.csv')
    acc = df["RMSE"]
    alc = df["Vectorization"]
    plt.bar(alc, acc, color=colors)
    plt.xlabel('Vectorization')
    plt.ylabel('RMSE')
    plt.title('DecisionTree RMSE Value')
    fig.savefig('results/DTRMSE.png')
    plt.pause(5)
    plt.show(block=False)
    plt.close()

    result2 = open('results/DTAccuracy.csv', 'w')
    result2.write("Vectorization,Accuracy" + "\n")
    for i in range(0, len(acyv)):
        result2.write(al[i] + "," + str(acyv[i]) + "\n")
    result2.close()

    fig = plt.figure(0)
    df = pd.read_csv('results/DTAccuracy.csv')
    acc = df["Accuracy"]
    alc = df["Vectorization"]
    plt.bar(alc, acc, color=colors)
    plt.xlabel('Vectorization')
    plt.ylabel('Accuracy')
    plt.title('DecisionTree Accuracy Value')
    fig.savefig('results/DTAccuracy.png')
    plt.pause(5)
    plt.show(block=False)
    plt.close()
コード例 #46
0
ファイル: pandasCore.py プロジェクト: dsmiff/pandasUtils
def plotDF(df):
    df.plot(kind='line', logx=True)
    plt.show()
コード例 #47
0
def cT_deaths(df):
    df.groupby('countriesAndTerritories')['deaths'].plot(kind='bar')
    plt.show()
コード例 #48
0
t_init = time.time()
A_list, v_list, sigma_list, logL_list, iter, total_time, full_seq_probs = \
    cpEM_BW(fluo_states, A_init=A_init, v_init=v, noise_init= sigma*2, estimate_noise=1, pi0=pi, w=w, max_stack=max_stack, keep_probs=1, verbose=1, max_iter=1000, eps=10e-6)

# First set up the figure, the axis, and the plot element we want to animate
# create the figure
fig = plt.figure()
ax = fig.add_subplot(111)


def init():
    im = ax.imshow(np.array(full_seq_probs[0]), interpolation='none')
    return im,


plt.show(block=False)


def animate(i):
    time.sleep(1)
    im = ax.imshow(np.array(full_seq_probs[i]), interpolation='none')
    return im,


anim = animation.FuncAnimation(fig,
                               animate,
                               init_func=init,
                               frames=len(full_seq_probs),
                               interval=1,
                               blit=True)
コード例 #49
0
 def plot_price(self, x, y):
     plt.plot(x, y)
     plt.show()
コード例 #50
0
ファイル: GUI_ASR.py プロジェクト: rubenchos6/ASR
def runASR(audio, rate, muestras, distance_columns, distance):
    # Plot audio

    rate, audio = read(audioName)
    try:
        audio = audio[:, 1]
    except:
        audio = audio

    plt.plot(audio)
    print(len(audio))

    ############################################################################
    # Plot Spectrogram and characteristics
    plt.figure()
    audioFiltrado = lfilter([1], [1, 0.63], audio)
    # muestras= 1024   # 21 ms
    s, w, t, im = plt.specgram(audioFiltrado,
                               Fs=rate,
                               NFFT=muestras,
                               window=scipy.signal.blackman(muestras),
                               noverlap=100)
    plt.ylabel('Frequency [Hz]')
    plt.xlabel('Time [sec]')
    plt.show()

    print("Vector que van a poblar nuestras frecuencias es de tamaño:")
    print(len(s))
    print("Número de ventanas:")
    print(len(s[0]))
    print("Tenemos la siguiente cantidad de frecuencia")
    print(len(w))
    # print(s)
    #############################################################################
    # Creamos una matriz con las formantes de todo el audio arrojadas por el espectro
    i = 0
    matrixFormantsSpectro = []
    for i in range(len(s[0])):
        try:
            valor = findFormantes(s[:, i])
            if (len(valor) > 2):
                matrixFormantsSpectro.append([i, valor])
        except:
            a = 1
    #############################################################################
    # Llamamos función para calcular la palabra aproximada

    array1 = aproxWord(matrixFormantsSpectro, statisticDBVocalsA,
                       distance_columns, distance)
    array2 = aproxWord(matrixFormantsSpectro, statisticDBVocals,
                       distance_columns, distance)
    array3 = aproxWord(matrixFormantsSpectro, statisticDB, distance_columns,
                       distance)
    array4 = aproxWord(matrixFormantsSpectro, statisticDBA, distance_columns,
                       distance)

    for l1 in array1:
        list1.insert(END, l1)
    for l2 in array2:
        list2.insert(END, l2)
    for l3 in array3:
        list3.insert(END, l3)
    for l4 in array4:
        list4.insert(END, l4)
コード例 #51
0
ファイル: Assignment.py プロジェクト: xiaohongniua/DL
def model(X_train,
          Y_train,
          X_test,
          Y_test,
          learning_rate=0.009,
          num_epochs=500,
          minibatch_size=64,
          print_cost=True):
    """
  Implements a three-layer ConvNet in Tensorflow:
  CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED

  Arguments:
  X_train -- training set, of shape (None, 64, 64, 3)
  Y_train -- test set, of shape (None, n_y = 6)
  X_test -- training set, of shape (None, 64, 64, 3)
  Y_test -- test set, of shape (None, n_y = 6)
  learning_rate -- learning rate of the optimization
  num_epochs -- number of epochs of the optimization loop
  minibatch_size -- size of a minibatch
  print_cost -- True to print the cost every 100 epochs

  Returns:
  train_accuracy -- real number, accuracy on the train set (X_train)
  test_accuracy -- real number, testing accuracy on the test set (X_test)
  parameters -- parameters learnt by the model. They can then be used to predict.
  """

    ops.reset_default_graph(
    )  # to be able to rerun the model without overwriting tf variables
    tf.set_random_seed(1)  # to keep results consistent (tensorflow seed)
    seed = 3  # to keep results consistent (numpy seed)
    (m, n_H0, n_W0, n_C0) = X_train.shape
    n_y = Y_train.shape[1]
    costs = []  # To keep track of the cost

    # Create Placeholders of the correct shape
    # START CODE HERE ### (1 line)
    X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
    ### END CODE HERE ###

    # Initialize parameters
    # START CODE HERE ### (1 line)
    parameters = initialize_parameters()
    ### END CODE HERE ###

    # Forward propagation: Build the forward propagation in the tensorflow graph
    # START CODE HERE ### (1 line)
    Z3 = forward_propagation(X, parameters)
    ### END CODE HERE ###

    # Cost function: Add cost function to tensorflow graph
    # START CODE HERE ### (1 line)
    cost = compute_cost(Z3, Y)
    ### END CODE HERE ###

    # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
    # START CODE HERE ### (1 line)
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cost)
    ### END CODE HERE ###

    # Initialize all the variables globally
    init = tf.global_variables_initializer()

    # Start the session to compute the tensorflow graph
    with tf.Session() as sess:

        # Run the initialization
        sess.run(init)

        # Do the training loop
        for epoch in range(num_epochs):

            minibatch_cost = 0.
            num_minibatches = int(
                m / minibatch_size
            )  # number of minibatches of size minibatch_size in the train set
            seed = seed + 1
            minibatches = random_mini_batches(X_train, Y_train, minibatch_size,
                                              seed)

            for minibatch in minibatches:

                # Select a minibatch
                (minibatch_X, minibatch_Y) = minibatch
                # IMPORTANT: The line that runs the graph on a minibatch.
                # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
                # START CODE HERE ### (1 line)
                _, cost_ = sess.run([optimizer, cost],
                                    feed_dict={
                                        X: minibatch_X,
                                        Y: minibatch_Y
                                    })
                ### END CODE HERE ###

                minibatch_cost += cost_ / num_minibatches

            # Print the cost every epoch
            if print_cost == True and epoch % 5 == 0:
                print("Cost after epoch %i: %f" % (epoch, minibatch_cost))
            if print_cost == True and epoch % 1 == 0:
                costs.append(minibatch_cost)

        # plot the cost
        plt.plot(np.squeeze(costs))
        plt.ylabel('cost')
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

        # Calculate the correct predictions
        predict_op = tf.argmax(Z3, 1)
        true_op = tf.argmax(Y, 1)
        correct_prediction = tf.equal(predict_op, true_op)

        # Calculate accuracy on the test set
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print(accuracy)
        train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
        test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
        print("Train Accuracy:", train_accuracy)
        print("Test Accuracy:", test_accuracy)

        return train_accuracy, test_accuracy, parameters
コード例 #52
0
ファイル: model.py プロジェクト: stobasa/datasist
def compare_model(models_list=None,
                  x_train=None,
                  y_train=None,
                  scoring_metric=None,
                  scoring_cv=3,
                  silenced=True):
    """
    Train multiple user-defined model and display report based on defined metric. Enables user to pick the best base model for a problem.

    Parameters
    ----------------
        models_list: list

            a list of models to be trained

        x_train: Array, DataFrame, Series

            The feature set (x) to use in training an estimator to predict the outcome (y).

        y_train: Series, 1-d array, list

            The ground truth value for the train dataset

        scoring_metric: str

            Metric to use in scoring the model

        scoring_cv: int

            default value is 3

    Returns
    ---------------
    a tuple of fitted_model and the model evaluation scores
    """
    # if not _same_model:
    #     raise ValueError("model_list: models must be of the same class. Cannot use a classifier and a regressor.")

    if models_list is None or len(models_list) < 1:
        raise ValueError("model_list: model_list can't be 'None' or empty")

    if x_train is None:
        raise ValueError("x_train: features can't be 'None' or empty")

    if y_train is None:
        raise ValueError("y_train: features can't be 'None' or empty")

    fitted_model = []
    model_scores = []
    model_names = []

    for i, model in enumerate(models_list):
        if silenced is not True:
            print(f"Fitting {type(model).__name__} ... \n")
        model.fit(x_train, y_train)
        # append fitted model into list
        fitted_model.append(model)
        model_score = np.mean(
            cross_val_score(model,
                            x_train,
                            y_train,
                            scoring=scoring_metric,
                            cv=scoring_cv))
        model_scores.append(model_score)
        model_names.append(type(fitted_model[i]).__name__)

    sns.pointplot(y=model_scores, x=model_names)
    plt.xticks(rotation=90)
    plt.title("Model comparison plot")
    plt.show()

    return fitted_model, model_scores
コード例 #53
0
 def plot_causal_model(self):
     nx.draw(self._causal_model, with_labels=True)
     plt.show()
コード例 #54
0
ANDHRA_PRADESH

# In[ ]:

plt.figure(figsize=(6, 4))
ax = sns.barplot(x="count", y="Quality Parameter", data=ANDHRA_PRADESH)
ax.set(xlabel='Count')
sns.despine(left=True, bottom=True)
plt.title("Water Quality Parameter In Andhra Pradesh")

# In[ ]:

plt.figure(figsize=(6, 4))
ax = sns.barplot(x="count", y="Quality Parameter", data=KERALA)
ax.set(xlabel='Count')
sns.despine(left=True, bottom=True)
plt.title("Water Quality Parameter In Kerala")

# **Total Water Quality Parameter in INDIA**

# In[ ]:

x = State_Quality_Count.groupby('State Name')
plt.rcParams['figure.figsize'] = (9.5, 6.0)
genre_count = sns.barplot(y='Quality Parameter',
                          x='count',
                          data=State_Quality_Count,
                          palette="Blues",
                          ci=None)
plt.show()