예제 #1
0
def plot_fits(direction_rates,fit_curve,title):
    """
    This function takes the x-values and the y-values  in units of spikes/s 
    (found in the two columns of direction_rates and fit_curve) and plots the 
    actual values with circles, and the curves as lines in both linear and 
    polar plots.
    """
    curve_xs = np.arange(direction_rates[0,0], direction_rates[-1,0])
    fit_ys2 = normal_fit(curve_xs,fit_curve[0],fit_curve[1],fit_curve[2])
    
    
    plt.subplot(2,2,3)
    plt.plot(direction_rates[:,0],direction_rates[:,1],'o',hold=True)
    plt.plot(curve_xs,fit_ys2,'-')
    plt.xlabel('Direction of Motions (Degrees)')
    plt.ylabel('Firing Rates (Spikes/sec)')
    plt.title(title)
    plt.axis([0, 360, 0, 40])
    plt.xticks(direction_rates[:,0])
    
    fit_ys = normal_fit(direction_rates[:,0],fit_curve[0],fit_curve[1],fit_curve[2])
    plt.subplot(2,2,4,polar=True)
    spkiecount = np.append(direction_rates[:,1],direction_rates[0,1])
    plt.polar(np.arange(0,361,45)*np.pi/180,spkiecount,'o',label='Firing Rate (spike/s)')
    plt.hold(True)
    spkiecount_y = np.append(fit_ys,fit_ys[0])
    plt.plot(np.arange(0,361,45)*np.pi/180,spkiecount_y,'-')    
    plt.legend(loc=8)
    plt.title(title)
    
    fit_ys2 = np.transpose(np.vstack((curve_xs,fit_ys2)))    
    
    return(fit_ys2)
예제 #2
0
    def bar(self, key_word_sep = " ", title=None, **kwargs):
        """Generates a pylab bar plot from the result set.

        ``matplotlib`` must be installed, and in an
        IPython Notebook, inlining must be on::

            %%matplotlib inline

        The last quantitative column is taken as the Y values;
        all other columns are combined to label the X axis.

        Parameters
        ----------
        title: Plot title, defaults to names of Y value columns
        key_word_sep: string used to separate column values
                      from each other in labels

        Any additional keyword arguments will be passsed
        through to ``matplotlib.pylab.bar``.
        """
        import matplotlib.pylab as plt
        self.guess_pie_columns(xlabel_sep=key_word_sep)
        plot = plt.bar(range(len(self.ys[0])), self.ys[0], **kwargs)
        if self.xlabels:
            plt.xticks(range(len(self.xlabels)), self.xlabels,
                       rotation=45)
        plt.xlabel(self.xlabel)
        plt.ylabel(self.ys[0].name)
        return plot
예제 #3
0
def plot_svc(X, y, mysvc, bounds=None, grid=50):
    if bounds is None:
        xmin = np.min(X[:, 0], 0)
        xmax = np.max(X[:, 0], 0)
        ymin = np.min(X[:, 1], 0)
        ymax = np.max(X[:, 1], 0)
    else:
        xmin, ymin = bounds[0], bounds[0]
        xmax, ymax = bounds[1], bounds[1]
    aspect_ratio = (xmax - xmin) / (ymax - ymin)
    xgrid, ygrid = np.meshgrid(np.linspace(xmin, xmax, grid),
                              np.linspace(ymin, ymax, grid))
    plt.gca(aspect=aspect_ratio)
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)
    plt.xticks([])
    plt.yticks([])
    plt.hold(True)
    plt.plot(X[y == 1, 0], X[y == 1, 1], 'bo')
    plt.plot(X[y == -1, 0], X[y == -1, 1], 'ro')
    
    box_xy = np.append(xgrid.reshape(xgrid.size, 1), ygrid.reshape(ygrid.size, 1), 1)
    if mysvc is not None:
        scores = mysvc.decision_function(box_xy)
    else:
        print 'You must have a valid SVC object.'
        return None;
    
    CS=plt.contourf(xgrid, ygrid, scores.reshape(xgrid.shape), alpha=0.5, cmap='jet_r')
    plt.contour(xgrid, ygrid, scores.reshape(xgrid.shape), levels=[0], colors='k', linestyles='solid', linewidths=1.5)
    plt.contour(xgrid, ygrid, scores.reshape(xgrid.shape), levels=[-1,1], colors='k', linestyles='dashed', linewidths=1)
    plt.plot(mysvc.support_vectors_[:,0], mysvc.support_vectors_[:,1], 'ko', markerfacecolor='none', markersize=10)
    CB = plt.colorbar(CS)
예제 #4
0
def plot_rfs(size, C, Rx, Ry, color='b'):
    radius = np.sqrt(size[...]/np.pi)
    a, w = 0, C.shape[0]
    plt.scatter(Rx, Ry, s=15, color='w', edgecolor='k')
    plt.scatter(C[a:w, 1], C[a:w, 0], s=radius*500, alpha=0.4, color=color)
    plt.xticks([])
    plt.yticks([])
예제 #5
0
def plot_confusion_matrix(cm, title='', cmap=plt.cm.Blues):
    #print cm
    #display vehicle, idle, walking accuracy respectively
    #display overall accuracy
    print type(cm)
   # plt.figure(index
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    #plt.figure("")
    plt.title("Confusion Matrix")
    plt.colorbar()
    tick_marks = [0,1,2]
    target_name = ["driving","idling","walking"]


    plt.xticks(tick_marks,target_name,rotation=45)

    plt.yticks(tick_marks,target_name,rotation=45)
    print len(cm[0])

    for i in range(0,3):
        for j in range(0,3):
         plt.text(i,j,str(cm[i,j]))
    plt.tight_layout()
    plt.ylabel("Actual Value")
    plt.xlabel("Predicted Outcome")
예제 #6
0
def show_filters(weights,nweights,d1, d2, nrows, ncols, scale):
    """
    Plots the rows of NumPy 2D array ``weights`` as ``d1`` by ``d2`` images.

    The images are layed out in a ``nrows`` by ``ncols`` grid.

    Option ``scale`` sets the maximum absolute value of elements in ``weights``
    that will be plotted (larger values will be clamped to ``scale``, with the
    right sign).
    """
    perm = range(nweights)
    #random.shuffle(perm)
    image = -scale*numpy.ones((nrows*(d1+1)-1,ncols*(d2+1)-1),dtype=float)
    for i in range(nrows):
        for j in range(ncols):
            image[(i*d1+i):((i+1)*d1+i),(j*d2+j):((j+1)*d2+j)] = -1*weights[perm[i*ncols + j]].reshape(d1,d2)

    for i in range(nrows*(d1+1)-1):
        for j in range(ncols*(d2+1)-1):
            a = image[i,j]
            if a > scale:
                image[i,j] = scale
            if a < -scale:
                image[i,j] = -scale

    bordered_image = scale * numpy.ones((nrows*(d1+1)+1,ncols*(d2+1)+1),dtype=float)

    bordered_image[1:nrows*(d1+1),1:ncols*(d2+1)] = image

    imshow(bordered_image,cmap = cm.Greys,interpolation='nearest')
    xticks([])
    yticks([])
예제 #7
0
파일: utils.py 프로젝트: gmum/mlls2015
def plot_grid_experiment_results(grid_results, params, metrics):
    global plt
    params = sorted(params)
    grid_params = grid_results.grid_params
    plt.figure(figsize=(8, 6))
    for metric in metrics:
        grid_params_shape = [len(grid_params[k]) for k in sorted(grid_params.keys())]
        params_max_out = [(1 if k in params else 0) for k in sorted(grid_params.keys())]
        results = np.array([e.results.get(metric, 0) for e in grid_results.experiments])
        results = results.reshape(*grid_params_shape)
        for axis, included_in_params in enumerate(params_max_out):
            if not included_in_params:
                results = np.apply_along_axis(np.max, axis, results)

        print results
        params_shape = [len(grid_params[k]) for k in sorted(params)]
        results = results.reshape(*params_shape)

        if len(results.shape) == 1:
            results = results.reshape(-1,1)
        import matplotlib.pylab as plt

        #f.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
        plt.imshow(results, interpolation='nearest', cmap=plt.cm.hot)
        plt.title(str(grid_results.name) + " " + metric)

        if len(params) == 2:
            plt.xticks(np.arange(len(grid_params[params[1]])), grid_params[params[1]], rotation=45)
        plt.yticks(np.arange(len(grid_params[params[0]])), grid_params[params[0]])
        plt.colorbar()
        plt.show()
예제 #8
0
    def plot_histogram_with_capacity(self, capacity, main=""):
        """Plot histogram of choices and capacities. The number of alternatives is determined
        from the second dimension of probabilities.
        """
        from matplotlib.pylab import bar, xticks, yticks, title, text, axis, figure, subplot

        probabilities = self.get_probabilities()
        if probabilities.ndim < 2:
            raise StandardError, "probabilities must have at least 2 dimensions."
        alts = self.probabilities.shape[1]
        width_par = (1 / alts + 1) / 2.0
        choice_counts = self.get_choice_histogram(0, alts)
        sum_probs = self.get_probabilities_sum()

        subplot(212)
        bar(arange(alts), choice_counts, width=width_par)
        bar(arange(alts) + width_par, capacity, width=width_par, color="r")
        xticks(arange(alts))
        title(main)
        Axis = axis()
        text(
            alts + 0.5,
            -0.1,
            "\nchoices histogram (blue),\ncapacities (red)",
            horizontalalignment="right",
            verticalalignment="top",
        )
예제 #9
0
def show_binary_images(samples, nsamples, d1, d2, nrows, ncols):
    """
    Plots samples in a NumPy 2D array ``samples`` as ``d1`` by ``d2`` images.
    (one sample per row of ``samples``).

    The samples are assumed to be images with binary pixels. The
    images are layed out in a ``nrows`` by ``ncols`` grid.
    """
    perm = range(nsamples)
    #random.shuffle(perm)
    if samples.shape[0] < nrows*ncols:
        samples_padded = numpy.zeros((nrows*ncols,samples.shape[1]))
        samples_padded[:samples.shape[0],:] = samples
        samples = samples_padded

    image = 0.5*numpy.ones((nrows*(d1+1)-1,ncols*(d2+1)-1),dtype=float)
    for i in range(nrows):
        for j in range(ncols):
            image[(i*d1+i):((i+1)*d1+i),(j*d2+j):((j+1)*d2+j)] = (1-samples[perm[i*ncols + j]].reshape(d1,d2))

    bordered_image = 0.5 * numpy.ones((nrows*(d1+1)+1,ncols*(d2+1)+1),dtype=float)

    bordered_image[1:nrows*(d1+1),1:ncols*(d2+1)] = image

    imshow(bordered_image,cmap = cm.Greys,interpolation='nearest')
    xticks([])
    yticks([])
예제 #10
0
def plot_runtime_results(results):
    plt.rcParams["figure.figsize"] = 7,7
    plt.rcParams["font.size"] = 22
    matplotlib.rc("xtick", labelsize=24)
    matplotlib.rc("ytick", labelsize=24)

    params = {"text.fontsize" : 32,
              "font.size" : 32,
              "legend.fontsize" : 30,
              "axes.labelsize" : 32,
              "text.usetex" : False
              }
    plt.rcParams.update(params)
    
    #plt.semilogx(results[:,0], results[:,3], 'r-x', lw=3)
    #plt.semilogx(results[:,0], results[:,1], 'g-D', lw=3)
    #plt.semilogx(results[:,0], results[:,2], 'b-s', lw=3)

    plt.plot(results[:,0], results[:,3], 'r-x', lw=3, ms=10)
    plt.plot(results[:,0], results[:,1], 'g-D', lw=3, ms=10)
    plt.plot(results[:,0], results[:,2], 'b-s', lw=3, ms=10)

    plt.legend(["Chain", "Tree", "FFT Tree"], loc="upper left")
    plt.xticks([1e5, 2e5, 3e5])
    plt.yticks([0, 60, 120, 180])

    plt.xlabel("Problem Size")
    plt.ylabel("Runtime (sec)")
    return results
예제 #11
0
    def plot_histogram(self, main="", numrows=1, numcols=1, fignum=1):
        """Plot a histogram of choices and probability sums. Expects probabilities as (at least) a 2D array.
        """
        from matplotlib.pylab import bar, xticks, yticks, title, text, axis, figure, subplot

        probabilities = self.get_probabilities()
        if probabilities.ndim < 2:
            raise StandardError, "probabilities must have at least 2 dimensions."
        alts = probabilities.shape[1]
        width_par = (1 / alts + 1) / 2.0
        choice_counts = self.get_choice_histogram(0, alts)
        sum_probs = self.get_probabilities_sum()

        subplot(numrows, numcols, fignum)
        bar(arange(alts), choice_counts, width=width_par)
        bar(arange(alts) + width_par, sum_probs, width=width_par, color="g")
        xticks(arange(alts))
        title(main)
        Axis = axis()
        text(
            alts + 0.5,
            -0.1,
            "\nchoices histogram (blue),\nprobabilities sum (green)",
            horizontalalignment="right",
            verticalalignment="top",
        )
예제 #12
0
def plotRocCurves(file_legend):
	pylab.clf()
	pylab.figure(1)
	pylab.xlabel('1 - Specificity', fontsize=12)
	pylab.ylabel('Sensitivity', fontsize=12)
	pylab.title("Need for Referral")
	pylab.grid(True, which='both')
	pylab.xticks([i/10.0 for i in range(1,11)])
	pylab.yticks([i/10.0 for i in range(0,11)])
	pylab.tick_params(axis="both", labelsize=15)

	for file, legend in file_legend:
		points = open(file,"rb").readlines()
		x = [float(p.split()[0]) for p in points]
		y = [float(p.split()[1]) for p in points]
		dev = [float(p.split()[2]) for p in points]
		x = [0.0] + x
		y = [0.0] + y
		dev = [0.0] + dev
	
		auc = np.trapz(y, x) * 100
		aucDev = np.trapz(dev, x) * 100

		pylab.grid()
		pylab.errorbar(x, y, yerr = dev, fmt='-')
		pylab.plot(x, y, '-', linewidth = 1.5, label = legend + u" (AUC = {0:0.1f}% \xb1 {1:0.1f}%)".format(auc,aucDev))

	pylab.legend(loc = 4, borderaxespad=0.4, prop={'size':12})
	pylab.savefig("referral/referral-curves.pdf", format='pdf')
예제 #13
0
    def algorithm_confidence_interval_figure(
        self, trace_file, algorithm, data, x_aspect, y_aspect, x_title, y_title, title
    ):

        reversed_data = transposed(data)

        fig, ax = plt.subplots()
        ax.set_xlabel(x_title, fontsize=18)
        ax.set_ylabel(y_title, fontsize=18)
        ax.set_title(title + " (" + self.legend(algorithm) + ")")
        x = self.vms_scenarios
        ax.xaxis.set_ticks(x)
        pylab.xticks(x, self.vms_ticks(x), rotation="vertical", verticalalignment="top")

        ax = fig.gca()

        scenarios_series = []
        m_series = []
        upper_ci_series = []
        lower_ci_series = []
        vms_series = []

        plt.grid(True)
        for scenario in reversed_data:
            x_serie = []
            y_serie = []
            x = int(scenario[0][x_aspect])

            for repetition in scenario:
                y = float(repetition[y_aspect])
                scatter(x, y, s=1, color="k")
                # ax.plot(x, y, color='red', ls='-', marker='.')#, label=self.legend(data_ref[0]['strategy']))
                y_serie += [y]
                x_serie += [x]

            m, ci = mean_confidence_interval(y_serie)
            # scenarios_series += [scenario['#VM']]
            m_series += [m]
            upper_ci_series += [m + ci]
            lower_ci_series += [m - ci]
            vms_series += [x_serie[0]]
            # ax.plot(x_serie[0], m, color='red', ls='-', marker='.', label=self.legend(algorithm))

            do_error_bar(x, m, ci, 1, 4)
            # print(x_serie)
            # print(y_serie)
            # print(m)
            # print(ci)

        print vms_series
        print m_series
        ax.plot(vms_series, m_series, color="blue", ls="-", marker=".", label=self.legend(algorithm))
        ax.plot(vms_series, upper_ci_series, color="red", ls="-.", marker=".", label=self.legend(algorithm))
        ax.plot(vms_series, lower_ci_series, color="green", ls="-.", marker=".", label=self.legend(algorithm))
        #        ax.plot(x2, y2b, color='blue', ls='-', marker='o', label=self.legend(data1[0]['strategy']))

        # plt.show()
        plt.savefig(self.result_dir + "/figure-" + trace_file + "-" + title + "-" + algorithm + ".png")
        #        plt.savefig('test.png')
        plt.close()
예제 #14
0
def plotRocCurves(lesion, lesion_en):
	file_legend = []
	for techniqueMid in techniquesMid:
		for techniqueLow in techniquesLow:
			file_legend.append((directory + techniqueLow + "/" + techniqueMid + "/operating-points-" + lesion + "-scale.dat", "Low-level: " + techniqueLow + ". Mid-level: " + techniqueMid + "."))
			
			pylab.clf()
			pylab.figure(1)
			pylab.xlabel('1 - Specificity', fontsize=12)
			pylab.ylabel('Sensitivity', fontsize=12)
			pylab.title(lesion_en)
			pylab.grid(True, which='both')
			pylab.xticks([i/10.0 for i in range(1,11)])
			pylab.yticks([i/10.0 for i in range(0,11)])
			#pylab.tick_params(axis="both", labelsize=15)
			
			for file, legend in file_legend:
				points = open(file,"rb").readlines()
				x = [float(p.split()[0]) for p in points]
				y = [float(p.split()[1]) for p in points]
				x.append(0.0)
				y.append(0.0)
				
				auc = numpy.trapz(y, x) * -100

				pylab.grid()
				pylab.plot(x, y, '-', linewidth = 1.5, label = legend + u" (AUC = {0:0.1f}%)".format(auc))

	pylab.legend(loc = 4, borderaxespad=0.4, prop={'size':12})
	pylab.savefig(directory + "plots/" + lesion + ".pdf", format='pdf')
예제 #15
0
def plot(frame,dirname,clim=None,axis_limits=None):
    if not os.path.exists('./figures'):
        os.makedirs('./figures')
        
    try:
        sol=Solution(frame,file_format='petsc',read_aux=False,path='./saved_data/'+dirname+'/_p/',file_prefix='claw_p')
    except IOError:
        'Data file not found; please unzip the files in saved_data/.'
        return
    x=sol.state.grid.x.centers; y=sol.state.grid.y.centers
    mx=len(x); my=len(y)
    
    mp=sol.state.num_eqn    
    yy,xx = np.meshgrid(y,x)

    p=sol.state.q[0,:,:]
    if clim is not None:
        pl.pcolormesh(xx,yy,p,cmap=cm.RdBu_r)
    else:
        pl.pcolormesh(xx,yy,p,cmap=cm.Reds)
    pl.title("t= "+str(sol.state.t),fontsize=20)
    pl.xticks(size=20); pl.yticks(size=20)
    cb = pl.colorbar();

    if clim is not None:
        pl.clim(clim[0],clim[1]);
    imaxes = pl.gca(); pl.axes(cb.ax)
    pl.yticks(fontsize=20); pl.axes(imaxes)
    pl.axis('equal')
    if axis_limits is None:
        pl.axis([np.min(x),np.max(x),np.min(y),np.max(y)])
    else:
        pl.axis([axis_limits[0],axis_limits[1],axis_limits[2],axis_limits[3]])
    pl.savefig('./figures/'+dirname+'.png')
    pl.close()
예제 #16
0
	def STAplot(self, option = 0):
		try:
			self.Files.OpenDatabase(self.NAME + '.h5')
			STA_TIME = self.Files.QueryDatabase('STA_Analysis', 'STA_TIME')[0]
			STA_Current = self.Files.QueryDatabase('STA_Analysis', 'STAstim')
			INTSTEP = self.Files.QueryDatabase('DataProcessing', 'INTSTEP')[0][0]
		except:
			print 'Sorry no data found'
		
		X = np.arange(-STA_TIME / INTSTEP, STA_TIME / INTSTEP, dtype=float) * INTSTEP
		
		if option == 1:
			fig = plt.figure()
			ax = fig.add_subplot(111)
			ax.plot(X[0:(STA_TIME/INTSTEP)],STA_Current[0:(STA_TIME/INTSTEP)], 
						linewidth=3, color='k')
			ax.plot(np.arange(-190,-170),np.ones(20)*0.35, linewidth=5,color='k')
			ax.plot(np.ones(200)*-170,np.arange(0.35,0.549,0.001),linewidth=5,color='k')
			ax.plot(np.arange(-200,0),np.zeros(200), 'k--', linewidth=2)
			plt.axis('off')
			plt.show()
			
		
		if option == 0:
			fig = plt.figure(figsize=(12,8))
			ax = fig.add_subplot(111)
			ax.plot(X[0:(STA_TIME / INTSTEP) + 50], STA_Current[0:(STA_TIME / INTSTEP) + 50],
						linewidth=3, color='k')
			plt.xticks(fontsize = 20)
			plt.yticks(fontsize = 20)
			plt.ylabel('current(pA)', fontsize = 20)
			plt.legend(('data'), loc='upper right')
			plt.show()
예제 #17
0
파일: model.py 프로젝트: PabloHN/htmd
    def eqDistribution(self, plot=True):
        """ Obtain and plot the equilibrium probabilities of each macrostate

        Parameters
        ----------
        plot : bool, optional, default=True
            Disable plotting of the probabilities by setting it to False

        Returns
        -------
        eq : ndarray
            An array of equilibrium probabilities of the macrostates

        Examples
        --------
        >>> model = Model(data)
        >>> model.markovModel(100, 5)
        >>> model.eqDistribution()
        """
        self._integrityCheck(postmsm=True)
        macroeq = np.ones(self.macronum) * -1
        for i in range(self.macronum):
            macroeq[i] = np.sum(self.msm.stationary_distribution[self.macro_ofmicro == i])

        if plot:
            from matplotlib import pylab as plt
            plt.ion()
            plt.figure()
            plt.bar(range(self.macronum), macroeq)
            plt.ylabel('Equilibrium probability')
            plt.xlabel('Macrostates')
            plt.xticks(np.arange(0.4, self.macronum+0.4, 1), range(self.macronum))
            plt.show()
        return macroeq
예제 #18
0
def plot_fullstack( binning = np.linspace(0,10,1), myquery='', plotvar = default_plot_variable, \
                    scalefactor = 1., user_ylim = None):

    fig = plt.figure(figsize=(10,6))
    plt.grid(True)
    lasthist = 0
    myhistos = gen_histos(binning=binning,myquery=myquery,plotvar=plotvar,scalefactor=scalefactor)
    for key, (hist, bins) in myhistos.iteritems():

      plt.bar(bins[:-1],hist,
              width=bins[1]-bins[0],
              color=colors[key],
              bottom = lasthist,
              edgecolor = 'k',
              label='%s: %d Events'%(labels[key],sum(hist)))
      lasthist += hist
     

    plt.title('CCSingleE Stacked Backgrounds',fontsize=25)
    plt.ylabel('Events',fontsize=20)
    if plotvar == '_e_nuReco' or plotvar == '_e_nuReco_better':
        xstring = 'Reconstructed Neutrino Energy [GeV]' 
    elif plotvar == '_e_CCQE':
        xstring = 'CCQE Energy [GeV]'
    else:
        xstring = plotvar
    plt.xlabel(xstring,fontsize=20)
    plt.legend()
    plt.xticks(list(plt.xticks()[0]) + [binning[0]])
    plt.xlim([binning[0],binning[-1]])
예제 #19
0
	def PSTH(self):
	
			
		TimeRes = np.array([0.1,0.25,0.5,1,2.5,5.0,10.0,25.0,50.0,100.0])

		Projection_PSTH = np.zeros((2,len(TimeRes)))
		for i in range(0,len(TimeRes)):
			Data_Hist,STA_Hist,Model_Hist,B = Hist(TimeRes[i])
			data = Data_Hist/np.linalg.norm(Data_Hist)
			sta = STA_Hist/np.linalg.norm(STA_Hist)
			model = Model_Hist/np.linalg.norm(Model_Hist)
			Projection_PSTH[0,i] = np.dot(data,sta)
			Projection_PSTH[1,i] = np.dot(data,model)
			
		import matplotlib.font_manager as fm
		
		plt.figure()
		plt.semilogx(TimeRes,Projection_PSTH[0,:],'gray',TimeRes,Projection_PSTH[1,:],'k',
			     linewidth=3, marker='o', markersize = 12)
		plt.xlabel('Time Resolution, ms',fontsize=25)
		plt.xticks(fontsize=25)
		#plt.axis["right"].set_visible(False)
		plt.ylabel('Projection onto PSTH',fontsize=25)
		plt.yticks(fontsize=25)
		prop = fm.FontProperties(size=20)
		plt.legend(('1D model','2D model'),loc='upper left',prop=prop)
		plt.tight_layout()
		plt.show()
def plot_p(frame):
    sol=Solution(frame,file_format='petsc',read_aux=False,path='./_output/_p/',file_prefix='claw_p')
    x=sol.state.grid.x.centers; y=sol.state.grid.y.centers
    mx=len(x); my=len(y)
    
    mp=sol.state.num_eqn    
    yy,xx = np.meshgrid(y,x)

    p=sol.state.q[0,:,:]
    fig = pl.figure(figsize=(8, 3.5))
    #pl.title("t= "+str(sol.state.t),fontsize=20)
    pl.xticks(size=20); pl.yticks(size=20)
    pl.xlabel('x',fontsize=20); pl.ylabel('y',fontsize=20)
    #pl.pcolormesh(xx,yy,p_subxy,cmap=cm.OrRd)
    pl.pcolormesh(xx,yy,p,cmap='RdBu_r')
    pl.autoscale(tight=True)
    cb = pl.colorbar(ticks=[0.5,1,1.5,2]);
    
    #pl.clim(ticks=[0.5,1,1.5,2])
    imaxes = pl.gca(); pl.axes(cb.ax)
    pl.yticks(fontsize=20); pl.axes(imaxes)
    #pl.xticks(fontsize=20); pl.axes(imaxes)
    #pl.axis('equal')
    pl.axis('tight')
    fig.tight_layout()
    pl.savefig('./_plots_to_paper/sound-speed_FV_t'+str(frame)+'_pcolor.png')
    pl.close()
def plot_p_leading_order(frame):
    mat = scipy.io.loadmat('sound-speed_2D-wave.mat')
    T=5; nt=T/0.5
    pp=mat['U'][nt,:,:]
    xx=mat['xx']
    yy=mat['yy']

    fig=pl.figure(figsize=(8, 3.5))
    #pl.title("t= "+str(sol.state.t),fontsize=20)
    pl.xticks(size=20); pl.yticks(size=20)
    pl.xlabel('x',fontsize=20); pl.ylabel('y',fontsize=20)
    #pl.pcolormesh(xx,yy,p_subxy,cmap=cm.OrRd)
    pl.pcolormesh(xx,yy,pp,cmap='RdBu_r')
    pl.autoscale(tight=True)
    cb = pl.colorbar(ticks=[0.5,1,1.5,2]);
    
    #pl.clim(ticks=[0.5,1,1.5,2])
    imaxes = pl.gca(); pl.axes(cb.ax)
    pl.yticks(fontsize=20); pl.axes(imaxes)
    #pl.xticks(fontsize=20); pl.axes(imaxes)
    #pl.axis('equal')
    pl.axis('tight')
    fig.tight_layout()
    pl.savefig('./_plots_to_paper/sound-speed_LO_t'+str(frame)+'_pcolor.png')
    pl.close()
예제 #22
0
def plot_tuning_curves(direction_rates, title):
    """
    This function takes the x-values and the y-values  in units of spikes/s 
    (found in the two columns of direction_rates) and plots a histogram and 
    polar representation of the tuning curve. It adds the given title.
    """
    x = direction_rates[:,0]
    y = direction_rates[:,1]
    plt.figure()
    plt.subplot(2,2,1)
    plt.bar(x,y,width=45,align='center')
    plt.xlim(-22.5,337.5)
    plt.xticks(x)
    plt.xlabel('Direction of Motion (degrees)')
    plt.ylabel('Firing Rate (spikes/s)')
    plt.title(title)   
        
        
    
    plt.subplot(2,2,2,polar=True)
    r = np.append(y,y[0])
    theta = np.deg2rad(np.append(x, x[0]))
    plt.polar(theta,r,label='Firing Rate (spikes/s)')
    plt.legend(loc=8)
    plt.title(title)
예제 #23
0
def test_probabilities(exp, n=1000):
    d = {}
    for i in range(n):
        foo = rp.parsex(exp)
        # foo = len(foo.replace(' ',''))
        if foo in d.keys():
            d[foo] += 1
        else:
            d[foo] = 1
    # lists = sorted(d.items())
    # x, y = zip(*lists)  # unpack a list of pairs into two tuples
    # print x
    # print y
    # for a, b in zip(x, y):
    #     plt.text(a,b, str("%s\n%s" % (a, b)))
    plt.xlabel("String length")
    plt.ylabel("Occurence")
    plt.title("Union: %s P=0.3 N=1000" % exp)
    # plt.plot(x, y)

    # For bar chart (use on Union)
    # See for labeling: https://stackoverflow.com/a/30229062
    l = sorted(d.items())
    x, y = zip(*l)
    plt.bar(range(len(y)), y, align="center")
    plt.xticks(range(len(x)), x)

    plt.show()
예제 #24
0
    def plot_cost(self):
        if self.show_cost not in self.train_outputs[0][0]:
            raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
#        print self.test_outputs
        train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
        test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
        if self.smooth_test_errors:
            test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
        numbatches = len(self.train_batch_range)
        test_errors = n.row_stack(test_errors)
        test_errors = n.tile(test_errors, (1, self.testing_freq))
        test_errors = list(test_errors.flatten())
        test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
        test_errors = test_errors[:len(train_errors)]

        numepochs = len(train_errors) / float(numbatches)
        pl.figure(1)
        x = range(0, len(train_errors))
        pl.plot(x, train_errors, 'k-', label='Training set')
        pl.plot(x, test_errors, 'r-', label='Test set')
        pl.legend()
        ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
        epoch_label_gran = int(ceil(numepochs / 20.)) 
        epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran 
        ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))

        pl.xticks(ticklocs, ticklabels)
        pl.xlabel('Epoch')
        pl.ylabel(self.show_cost)
        pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
        print "plotted cost"
예제 #25
0
    def plot(self):
        self._logger.debug('plotting')
        colors = self._colors[:(len(self._categoryData))]
        ind = pylab.arange(len(self._xData))
        bar_width = 1.0 / (len(self._categoryData) + 1)
        bar_groups = []

        for c in range(len(self._categoryData)):
            bars = pylab.bar(ind+c*bar_width, self._yData[c], bar_width, color=colors[c % len(colors)])
            bar_groups.append(bars)

        pylab.xticks(ind+bar_width, self._xData)
        if (self._usingLegend):
            pylab.legend((b[0] for b in bar_groups), self._categoryData,
                         title = self._legendTitle, loc = self._legendLocation,
                         labelspacing = self._legendLabelSpacing, 
                         prop = self._legendFontProps, bbox_to_anchor = self._legendBboxToAnchor)

        pylab.xlabel(self._xLabel, fontdict=self._font)
        pylab.ylabel(self._yLabel, fontdict=self._font)
        pylab.title(self._title, fontdict=self._font)
        if(self._saveFig):
            self._logger.debug('Saving plot as {}'.format(self._saveName))
            pylab.savefig(self._saveName)

        pylab.show()
    def plot_cost(self):
        if self.show_cost not in self.train_outputs[0][0]:
            raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
        train_errors = [o[0][self.show_cost][self.cost_idx] for o in self.train_outputs]
        test_errors = [o[0][self.show_cost][self.cost_idx] for o in self.test_outputs]

        numbatches = len(self.train_batch_range)
        test_errors = numpy.row_stack(test_errors)
        test_errors = numpy.tile(test_errors, (1, self.testing_freq))
        test_errors = list(test_errors.flatten())
        test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
        test_errors = test_errors[:len(train_errors)]

        numepochs = len(train_errors) / float(numbatches)
        pl.figure(1)
        x = range(0, len(train_errors))
        pl.plot(x, train_errors, 'k-', label='Training set')
        pl.plot(x, test_errors, 'r-', label='Test set')
        pl.legend()
        ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
        epoch_label_gran = int(ceil(numepochs / 20.)) # aim for about 20 labels
        epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) # but round to nearest 10
        ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))

        pl.xticks(ticklocs, ticklabels)
        pl.xlabel('Epoch')
#        pl.ylabel(self.show_cost)
        pl.title(self.show_cost)
        pl.savefig('cost.png')
예제 #27
0
def Iris_network(ant, data):
    #G = nx.watts_strogatz_graph(100,3,0.6)
    #G = nx.cubical_graph()
    G = nx.Graph() #無向グラフ

    tmp1 = []
    tmp2 = []
    tmp3 = []
    for i in range(len(data)):
        if data[i][4] == 'setosa':
            tmp1.append(str(i))
        elif data[i][4] == 'versicolor':
            tmp2.append(str(i))
        elif data[i][4] == 'virginica':
            tmp3.append(str(i))

    for i in range(len(data)):
        if len(ant[i].parent) == 0 : pass
        else:
            dest = ant[i].parent[0]
            #G.add_edge(str(ant[i].data), str(ant[dest].data))
            G.add_edge(str(ant[i].Id), str(ant[dest].Id))

    pos = nx.spring_layout(G)

    nx.draw_networkx_nodes(G, pos, nodelist=tmp1, node_size=30, node_color="r")
    nx.draw_networkx_nodes(G, pos, nodelist=tmp2, node_size=30, node_color="w")
    nx.draw_networkx_nodes(G, pos, nodelist=tmp3, node_size=30, node_color="w")
    nx.draw_networkx_edges(G, pos, width=1)
    #nx.draw_networkx_labels(G, pos, font_size=10, font_color="b")
    plt.xticks([])
    plt.yticks([])
    plt.show()
예제 #28
0
def plotDist(subplot, X, Y, label):
    pylab.grid()
    pylab.subplot(subplot)
    pylab.bar(X, Y, 0.05)
    pylab.ylabel(label)
    pylab.xticks(arange(len(X)), X)
    pylab.yticks(arange(0,1,0.1))
예제 #29
0
def bar_chart(categories, xdata, ydata,
              title, xlabel, ylabel,
              font={'family':'serif','color':'black','weight':'normal','size':12,},
              plot=True, saveImage=False, imageName='fig.png'):

    colors = 'rgbcmyk'
    colors = colors[:(len(categories))]

    ind  = pylab.arange(len(xdata))
    bar_width = 1.0 / (len(categories) + 1)
    bar_groups = []

    # loop through categories and plot one bar in each category every loop (ie., one color at a time.)
    fig = pylab.figure()
    for c in range(len(categories)):
        bars = pylab.bar(ind+c*bar_width, ydata[c], bar_width, color=colors[c % len(colors)])
        bar_groups.append(bars)

    fontP = FontProperties()
    fontP.set_size('small')
    pylab.xticks(ind+bar_width, xdata)
    pylab.legend([b[0] for b in bar_groups], categories, 
                 loc='center right', title='Flow #', labelspacing=0,
                 prop=fontP, bbox_to_anchor=(1.125, .7))
    pylab.xlabel(xlabel, fontdict=font)
    pylab.ylabel(ylabel, fontdict=font)
    pylab.title(title, fontdict=font)

    # save the figure
    if saveImage:
        pylab.savefig(imageName)

    # plot the figure
    if plot:
        pylab.show()
def boxplot_poi(data, var_name):
    """
    Makes box plot with variable "var_name"
    split into
    :param data: data dict with enron data
    :param var_name: name of variable to plot
    :return: plot object
    """
    poi_v = []
    no_poi_v = []
    for p in data.itervalues():
        value = p[var_name]
        if value == "NaN":
            value = 0
        if p["poi"] == 1:
            poi_v.append(value)
        else:
            no_poi_v.append(value)
    plt.xlabel("POI")
    plt.ylabel(var_name)
    plt.boxplot([poi_v, no_poi_v])
    plt.xticks([1, 2], ["POI", "Not a POI"])
    # http://stackoverflow.com/a/29780292/1952996
    for i, v in enumerate([poi_v, no_poi_v]):
        y = v
        x = np.random.normal(i+1, 0.04, size = len(y))
        plt.plot(x, y, "r.", alpha=0.2)
예제 #31
0
def plot_profile(cfg, plot_params):
    """Plot profiles.

    From previously calculated data for Hovmoeller diagrams.

    Parameters
    ----------
    model_filenames: OrderedDict
        OrderedDict with model names as keys and input files as values.
    cmor_var: str
        name of the CMOR variable
    region: str
        name of the region predefined in `hofm_regions` function.
    diagworkdir: str
        path to work directory.
    diagplotdir: str
        path to plotting directory.
    cmap: matplotlib.cmap object
        color map
    dpi: int
        dpi fro the output figure
    observations: str
        name of the dataset with observations

    Returns
    -------
    None
    """
    level_clim = Dataset(plot_params['model_filenames'][
        plot_params['observations']]).variables['lev'][:]
    plt.figure(figsize=(5, 6))
    axis = plt.subplot(111)

    color = iter(plot_params['cmap'](np.linspace(
        0, 1, len(plot_params['model_filenames']))))
    lev_limit_clim = level_clim[level_clim <= cfg['hofm_depth']].shape[0] + 1

    mean_profile = np.zeros((level_clim[:lev_limit_clim].shape[0],
                             len(plot_params['model_filenames']) - 1))
    mean_profile_counter = 0

    for mmodel in plot_params['model_filenames']:
        logger.info("Plot profile %s data for %s, region %s",
                    plot_params['variable'], mmodel, plot_params['region'])
        # construct input filenames
        ifilename = genfilename(cfg['work_dir'], plot_params['variable'],
                                mmodel, plot_params['region'], 'hofm', '.npy')
        ifilename_levels = genfilename(cfg['work_dir'],
                                       plot_params['variable'], mmodel,
                                       plot_params['region'], 'levels', '.npy')
        # load data
        hofdata = np.load(ifilename, allow_pickle=True)
        lev = np.load(ifilename_levels, allow_pickle=True)

        # convert data if needed and set labeles
        cb_label, hofdata = label_and_conversion(plot_params['variable'],
                                                 hofdata)

        # set index for maximum level (max_level+1)
        lev_limit = lev[lev <= cfg['hofm_depth']].shape[0] + 1

        # calculate mean profile
        profile = (hofdata)[:, :].mean(axis=1)

        if mmodel != plot_params['observations']:
            next_color = next(color)
        else:
            next_color = 'k'

        plt.plot(profile, lev[0:lev_limit], label=mmodel, c=next_color)

        # interpolate to standard levels and add to mean profile
        profile_interpolated = np.interp(level_clim[:lev_limit_clim],
                                         lev[0:lev_limit], profile)
        if mmodel != plot_params['observations']:

            print('include {} in to the mean'.format(mmodel))
            mean_profile[:, mean_profile_counter] = profile_interpolated
            mean_profile_counter += 1

    # Here we are ploting the mean profile separately
    mean_profile_mean = np.nanmean(mean_profile, axis=1)

    plt.plot(mean_profile_mean,
             level_clim[:lev_limit_clim],
             label='MODEL-MEAN',
             linestyle='--',
             color='k',
             lw=3)

    plt.xticks(size=12)
    plt.yticks(size=12)

    plt.xlabel(cb_label, size=12, rotation='horizontal')
    plt.ylabel('m', size=12, rotation='horizontal')

    plt.ylim(0, cfg['hofm_depth'])

    # we shift the legend and plot it
    box = axis.get_position()
    axis.set_position([box.x0, box.y0, box.width * 0.8, box.height])
    axis.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=10)

    plt.gca().invert_yaxis()

    plot_params['basedir'] = cfg['plot_dir']
    plot_params['ori_file'] = ifilename
    plot_params['areacello'] = None
    plot_params['mmodel'] = None

    pltoutname = genfilename(cfg['plot_dir'], plot_params['variable'],
                             'MULTIMODEL', plot_params['region'], 'profile')

    plt.savefig(pltoutname, dpi=plot_params['dpi'], bbox_inches='tight')
    provenance_record = get_provenance_record(plot_params, 'profile', 'png')
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(pltoutname + '.png', provenance_record)
예제 #32
0
w_in = w_0 * np.sqrt(1 + (rho_0z/z_R)**2)


plt.figure(13)
plt.plot( w_in/(np.sqrt(2) * rho), -grad_z0*10**8, lw=2, c="c", label="rho_0x = 0")#

#plt.plot( w * 10 ** 6, grad_z1*10**8, lw=2, c="r", label="rho_0x = -5um")

#plt.plot( w * 10 ** 6, grad_z2*10**8, lw=2, c="g", label="rho_0x = 5um")

#plt.plot( w * 10 ** 6, grad_z3*10**8, lw=2, c="y", label="rho_0x = 7.07um")


new_ticks1 = np.linspace(0, 3, 4) # plot axis
print(new_ticks1)
plt.xticks(new_ticks1,fontsize=20)
plt.yticks(np.linspace(5, -20, 6),fontsize=20)
plt.rc('xtick',labelsize=20)
plt.rc('ytick',labelsize=20)
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))
ax.spines['bottom'].set_position(('data',0))

plt.legend(loc=1,fontsize=15)

plt.xlabel('w/(sqrt(2)*rho)',fontsize=20)
plt.ylabel('kz(10^(-8)N/m)',fontsize=20)
     0, 30, 60, 90, 120, 180, 270, 360
 ]  # use small negative number to include 0 in the interpolation
 modis_scd = np.nanmean(
     ann_scd_m[1:12], axis=0
 ) / 100.  # was in % #TODO fix up hack for errornous data 2012 onwards
 CS1 = plt.contourf(x_centres,
                    y_centres,
                    modis_scd,
                    levels=bin_edges,
                    cmap=plt.cm.magma_r,
                    extend='max')
 # CS1.cmap.set_bad('grey')
 CS1.cmap.set_over([0.47, 0.72, 0.77])
 plt.gca().set_aspect('equal')
 # plt.imshow(modis_scd, origin=0, interpolation='none', vmin=0, vmax=365, cmap='magma_r')
 plt.xticks([])
 plt.yticks([])
 cbar = plt.colorbar()
 cbar.set_label('Snow cover duration (days)', rotation=90)
 plt.xticks(np.arange(12e5, 17e5, 2e5))
 plt.yticks(np.arange(50e5, 55e5, 2e5))
 plt.ticklabel_format(axis='both', style='sci', scilimits=(0, 0))
 plt.ylabel('NZTM northing')
 plt.xlabel('NZTM easting')
 plt.title('Model mean SCD {} to {}'.format(years_to_take[0],
                                            years_to_take[-1]))
 plt.tight_layout()
 plt.savefig(plot_folder + '/SCD model {} to {} {}.png'.format(
     years_to_take[0], years_to_take[-1], run_id),
             dpi=600)
 plt.clf()
예제 #34
0
from scipy import stats
import numpy as np
import matplotlib.pylab as plt

# create some normal random noisy data
ser = 50*np.random.rand() * np.random.normal(10, 10, 100) + 20
print (ser)
# plot normed histogram
plt.hist(ser, density=True)

# find minimum and maximum of xticks, so we know
# where we should compute theoretical distribution
xt = plt.xticks()[0]
xmin, xmax = min(xt), max(xt)
lnspc = np.linspace(xmin, xmax, len(ser))

# lets try the normal distribution first
m, s = stats.norm.fit(ser) # get mean and standard deviation
pdf_g = stats.norm.pdf(lnspc, m, s) # now get theoretical values in our interval
plt.plot(lnspc, pdf_g, label="Norm") # plot it

# exactly same as above
ag,bg,cg = stats.gamma.fit(ser)
pdf_gamma = stats.gamma.pdf(lnspc, ag, bg,cg)
plt.plot(lnspc, pdf_gamma, label="Gamma")

# guess what :)
ab,bb,cb,db = stats.beta.fit(ser)
pdf_beta = stats.beta.pdf(lnspc, ab, bb,cb, db)
plt.plot(lnspc, pdf_beta, label="Beta")
예제 #35
0
fig = plt.figure(figsize=(22, 12))
gs = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
gs.update(wspace=0.04)
ax1 = plt.subplot(gs[0])
plt.rcParams["figure.figsize"] = [16, 9]  #
plt.plot(df2.index.year,
         df2['sd'][:],
         label='ERA5-Land',
         linewidth=2,
         c='blue')

plt.legend(loc="upper left", markerscale=1., scatterpoints=1, fontsize=20)

#ax.set_xlim(result.index.year[0], result.index.year[-1])
plt.xticks(range(df2.index.year[0] - 1, df2.index.year[-1] + 1, 10),
           fontsize=14)
plt.yticks(fontsize=14)
# Don't allow the axis to be on top of your data
ax1.set_axisbelow(True)

ax1.grid(axis="x", linestyle="--", color='black', linewidth=0.25, alpha=0.5)
ax1.grid(axis="y", linestyle="--", color='black', linewidth=0.25, alpha=0.5)
# Show the minor grid lines with very faint and almost transparent grey lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)

#xposition = [1970, 2000, 2010, 2040, 2070]
#for xc in xposition:
#    plt.axvline(x=xc, color='k', linestyle='--')
#for label in ax1.get_yticklabels():
#    label.set_fontsize(20)
예제 #36
0
def run(shot,time_i,time_f,treename='EFITRT1'):
   import os, sys
   import subprocess                 # For issuing commands to the OS.
   import numpy as np
   from matplotlib import pylab
   #import matplotlib.tri as tri


     
   PARAMETERS=['\\bdry','\\gtime','\\lim','\\limitr','\\mh','\\mw','\\nbdry',
               '\\psirz','\\r','\\rgrid1',
               '\\rhovn','\\rmaxis','\\rzero','\\xdim','\\z',
               '\\zdim','\\zmaxis','\\zmid'];
   
   geqdsk=[];

   treename='EFITRT1'

   with MDS(server="172.17.100.200:8005") as mds:
      try:
          eq=mds.open(shot=shot, tree=treename);
      except: 
          print "Error #1"
      else:
          try:
              print mds.alist
       
              for signame in PARAMETERS:
                  print 'reading ...',signame
                  temp = eq.get(signame).data();
                  geqdsk.append(temp); 
                  #print geqdsk[PARAMETERS.index(signame)];
              
          except:
              print "Can not reading the signal\n Quit the program";          
              sys.exit(0);
          else:
              print "END of reading"
              #plt.show();


   mds.close();
   
   index_time = PARAMETERS.index('\\gtime');

   if( time_i < geqdsk[index_time][0] ):
       time_i = geqdsk[index_time][0];
       print "the initial time set to", time_i;
   
   if( time_f > geqdsk[index_time][len(geqdsk[index_time])-1] ):
       time_f =  geqdsk[index_time][len(geqdsk[index_time])-1]
       print "the final time set to", time_f;
       
   
       
   nlim =  geqdsk[PARAMETERS.index('\\limitr')][0]
            
   rlim = np.zeros( (nlim,) ) # "R of limiter points in meter"
   zlim = np.zeros( (nlim,) ) # "Z of limiter points in meter" 
            
   for w in range(nlim): 
       rlim[w] = geqdsk[PARAMETERS.index('\\lim')][w][0]
       zlim[w] = geqdsk[PARAMETERS.index('\\lim')][w][1]

   for i in range(0,len(geqdsk[index_time])):
       
       time = geqdsk[index_time][i];      
                   
       #if(( time >= time_i) and ( time <= time_f )):
       if True:
            time_fileout = time*1000;
            #print '%06d'%time_fileout,time
            filename='kstar_%s_%05d_%06d'%(treename,shot,time_fileout);
            print 'writing..',filename;
                       
            
            nw=geqdsk[PARAMETERS.index('\\mw')][i];
            nh=geqdsk[PARAMETERS.index('\\mh')][i];

            rmin   = geqdsk[PARAMETERS.index('\\rgrid1')][i]; 
            rdim   = geqdsk[PARAMETERS.index('\\xdim')][i]; 
            rmax   = rdim+rmin;
            dr     = (rmax - rmin)/float(nw - 1) 
            
            zdim   = geqdsk[PARAMETERS.index('\\zdim')][i]; 
            zmid   = geqdsk[PARAMETERS.index('\\zmid')][i];
            
            zmin = zmid - zdim*0.5 
            zmax = zmid + zdim*0.5 
            dz = (zmax - zmin)/float(nh - 1) 

            rmaxis = geqdsk[PARAMETERS.index('\\rmaxis')][i];
            zmaxis = geqdsk[PARAMETERS.index('\\zmaxis')][i];
            
            rs = np.arange(rmin, rmin + (rmax-rmin)*(1.+1.e-10), dr) 
            zs = np.arange(zmin, zmin + (zmax-zmin)*(1.+1.e-10), dz) 
            
            nbbbs =  geqdsk[PARAMETERS.index('\\nbdry')][i]
       
            rbbbs = np.zeros( (nbbbs,) ) # "R of boundary points in meter"
            zbbbs = np.zeros( (nbbbs,) ) # "Z of boundary points in meter" 
                       
            for w in range(nbbbs): 
                 rbbbs[w] = geqdsk[PARAMETERS.index('\\bdry')][i][w][0]
                 zbbbs[w] = geqdsk[PARAMETERS.index('\\bdry')][i][w][1]
                 
            string_time = '#%06d t=%6.5f\n'%(shot,time)
            
            pylab.figure(1, figsize=(4,6))
            
            #pylab.pcolor(rs, zs, geqdsk[PARAMETERS.index('\\psirz')][i], shading='interp')

            #pylab.fill([1.0,2.0,2.0,1.0],[0.0,0.0,1.0,1.0], facecolor='b',alpha=1.0, edgecolor='black') 

            # Create the Triangulation; no triangles so Delaunay triangulation created.

            if i==0:
                rxx, zyy = np.meshgrid(rs,zs);
            

            plt.contour(rxx,zyy,geqdsk[PARAMETERS.index('\\psirz')][i],20,linewidths=1)

            pylab.plot(rbbbs,zbbbs, 'r') 
            pylab.plot(rlim, zlim, 'k',linewidth=3) 

            pylab.axis('scaled') 

            pylab.title(string_time) 

            pylab.xlabel('R'); pylab.xticks([1.2,1.6,2.0,2.4]);
            pylab.ylabel('Z')
                        
            pylab.savefig(filename+'.png',fmt='png',transparent=False);
            
            pylab.clf()
            
            
   # Merging to make .avi
   
   not_found_msg = """
       The mencoder command was not found;
       mencoder is used by this script to make an avi file from a set of pngs.
       It is typically not installed by default on linux distros because of
       legal restrictions, but it is widely available.
         """
   
   try:
    subprocess.check_call(['mencoder'])
   except subprocess.CalledProcessError:
    print "mencoder command was found"
    pass # mencoder is found, but returns non-zero exit as expected
    # This is a quick and dirty check; it leaves some spurious output
    # for the user to puzzle over.
   except OSError:
    print not_found_msg
    sys.exit("quitting\n")
    
   
   filelist='mf://kstar_%s_%05d_*'%(treename,shot);
   
   outfile_name='kstar_%s_%05d.avi'%(treename,shot);
   
   command = ('mencoder',
           'mf://*.png',
           '-mf',
           'type=png:w=400:h=600:fps=15',
           '-ovc',
           'lavc',
           '-lavcopts',
           'vcodec=mpeg4',
           '-oac',
           'copy',
           '-o',
           outfile_name)

#os.spawnvp(os.P_WAIT, 'mencoder', command)

   print "\n\nabout to execute:\n%s\n\n" % ' '.join(command)
   subprocess.check_call(command)

   print "\n\n The movie was written to %s"%(outfile_name);

   cmd_rm = "rm -f *.png"
   os.system(cmd_rm)

   cmd_rm = "scp " + outfile_name + " [email protected]:~/rtEFITMovie/rtEFIT.avi"
   os.system(cmd_rm)

   webfile_name='rt%06d.avi'%(shot);
   cmd_rm = "scp " + outfile_name + " [email protected]:/usr/local/tomcat5/webapps/kstarweb/intranet/efitmovie/movies/"+webfile_name
   os.system(cmd_rm)
예제 #37
0
# coding = utf-8
from matplotlib import pylab as plt

plt.figure(figsize=(20, 8), dpi=200)
# 数据在 x 轴的位置,是一个可迭代对象(加产生[2, 4, ..., 24]12个索引的列表)
x = range(2, 26, 2)

# 数据在 y 轴的位置, 必须是一个可迭代对象(对应x轴产生12个值的列表)
y = [15, 13, 16, 5, 17, 15, 17, 15, 16, 18, 80, 55]

# 传入 x, y 绘制 折线图
plt.plot(x, y)

# 传入 x y 的 轴的刻度
# plt.xticks(range(1, 26))
plt.yticks(range(1, 101, 5))

# 更加精准 使用列表推导式
plt.xticks([i / 2 for i in range(1, 49)], rotation=45)

# 显示图形
# plt.show()

# 保存图片
plt.savefig('./matp.png')
예제 #38
0
z = np.cos(x)
# normal line plot
plt.plot(x, y, color='black', linewidth=1.5, label='Sin')
# dashed line plot with circle markers
plt.plot(x,
         z,
         marker='o',
         color='blue',
         linestyle='dashed',
         linewidth=1.5,
         label='Cos')

# set labels, labels sizes, ticks, ticks sizes
plt.xlabel('Time [s]', fontsize=9)
plt.ylabel('Power [arb]', fontsize=9)
plt.xticks(fontsize=9)
plt.yticks(fontsize=9)

# display different scale for the same curve
ax2 = ax.twinx()  # make second y axis (while duplicating/twin x)
y2 = 20 * np.sin(x)
# don't actually display the plot, use linestyle='None'
plt.plot(x, y2, linestyle='None')
plt.ylabel('20*Power [arb]', fontsize=7)
plt.yticks(fontsize=7)

# saving the plot
# for the paper draft, its best to use png. When we actually submit a paper
# we'll need to save the plot as a .eps file instead.
savefile = 'Figure1_1subplot.png'
plt.savefig(savefile, dpi=300, facecolor='w', edgecolor='k')
예제 #39
0
ot_lpl1 = ot.da.SinkhornLpl1Transport(reg_e=1e-1, reg_cl=1e0)
ot_lpl1.fit(Xs=Xs, ys=ys, Xt=Xt)

# transport source samples onto target samples
transp_Xs_emd = ot_emd.transform(Xs=Xs)
transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs)
transp_Xs_lpl1 = ot_lpl1.transform(Xs=Xs)

##############################################################################
# Fig 1 : plots source and target samples + matrix of pairwise distance
# ---------------------------------------------------------------------

pl.figure(1, figsize=(10, 10))
pl.subplot(2, 2, 1)
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Source  samples')

pl.subplot(2, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Target samples')

pl.subplot(2, 2, 3)
pl.imshow(M, interpolation='nearest')
pl.xticks([])
pl.yticks([])
예제 #40
0
def tsplot_plot(cfg, plot_params):
    """Plot a TS diagram.

    Parameters
    ----------
    model_filenames: OrderedDict
        OrderedDict with model names as keys and input files as values.
    max_level: float
        maximum depth level the TS plot shoud go.
    region: str
        name of the region predefined in `hofm_regions` function.
    diagworkdir: str
        path to work directory.
    diagplotdir: str
        path to plotting directory.
    ncols: str
        number of columns in the resulting plot
        (raws will be calculated from total number of plots)
    cmap: matplotlib.cmap object
        color map
    observations: str
        name of the dataset with observations

    Returns
    -------
    None
    """
    # Setup a figure
    nplots = len(plot_params['model_filenames'])
    ncols = float(plot_params['ncols'])
    nrows = math.ceil(nplots / ncols)
    ncols = int(ncols)
    nrows = int(nrows)
    nplot = 1
    plt.figure(figsize=(8 * ncols, 2 * nrows * ncols))

    # loop over models
    for mmodel in plot_params['model_filenames']:
        logger.info("Plot  tsplot data for %s, region %s", mmodel,
                    plot_params['region'])
        # load mean data created by `tsplot_data`
        ifilename_t = genfilename(cfg['work_dir'], 'thetao', mmodel,
                                  plot_params['region'], 'tsplot', '.npy')
        ifilename_s = genfilename(cfg['work_dir'], 'so', mmodel,
                                  plot_params['region'], 'tsplot', '.npy')
        ifilename_depth = genfilename(cfg['work_dir'], 'depth', mmodel,
                                      plot_params['region'], 'tsplot', '.npy')

        temp = np.load(ifilename_t, allow_pickle=True)
        salt = np.load(ifilename_s, allow_pickle=True)
        depth = np.load(ifilename_depth, allow_pickle=True)
        # Still old fashioned way to setup a plot, works best for now.
        plt.subplot(nrows, ncols, nplot)
        # calculate background with density isolines
        si2, ti2, dens = dens_back(33, 36., -2, 6)

        # convert form Kelvin if needed
        if temp.min() > 100:
            temp = temp - 273.15

        # plot the background
        contour_plot = plt.contour(si2,
                                   ti2,
                                   dens,
                                   colors='k',
                                   levels=np.linspace(dens.min(), dens.max(),
                                                      15),
                                   alpha=0.3)
        # plot the scatter plot
        plt.scatter(salt[::],
                    temp[::],
                    c=depth,
                    s=3.0,
                    cmap=plot_params['cmap'],
                    edgecolors='none',
                    vmax=cfg['tsdiag_depth'])
        # adjust the plot
        plt.clabel(contour_plot, fontsize=12, inline=1, fmt='%1.1f')
        plt.xlim(33, 36.)
        plt.ylim(-2.1, 6)
        plt.xlabel('Salinity', size=20)
        plt.ylabel(r'Temperature, $^{\circ}$C', size=20)
        plt.xticks(size=15)
        plt.yticks(size=15)
        # setup the colorbar
        colorbar = plt.colorbar(pad=0.03)
        colorbar.ax.get_yaxis().labelpad = 15
        colorbar.set_label('depth, m', rotation=270, size=20)
        colorbar.ax.tick_params(labelsize=15)

        plt.title(mmodel, size=20)
        nplot = nplot + 1

    plt.tight_layout()
    # save the plot
    pltoutname = genfilename(cfg['plot_dir'],
                             'tsplot',
                             region=plot_params['region'],
                             data_type='tsplot')
    plt.savefig(pltoutname, dpi=100)
    plot_params['basedir'] = cfg['plot_dir']
    plot_params['ori_file'] = ifilename_t
    plot_params['areacello'] = None
    plot_params['mmodel'] = None

    provenance_record = get_provenance_record(plot_params, 'tsplot', 'png')
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(pltoutname + '.png', provenance_record)
예제 #41
0
# "nonparametric" method)
# this produces a noise field having spatial correlation structure similar to
# the input field
init_noise, generate_noise = stp.noise.get_method(noise_method)
F = init_noise(R_)

# plot four realizations of the stochastic noise
nrows = int(np.ceil((1 + num_realizations) / 4.))
plt.subplot(nrows, 4, 1)
for k in range(num_realizations + 1):
    if k == 0:
        plt.subplot(nrows, 4, k + 1)
        stp.plt.plot_precip_field(R,
                                  units=metadata["unit"],
                                  title="Rainfall field",
                                  colorbar=False)
    else:
        ## generate the noise
        N = generate_noise(F, seed=seed + k)

        ## if necessary, reshape to orginal domain
        N, _ = reshaper(N, metadata_, inverse=True)

        plt.subplot(nrows, 4, k + 1)
        plt.imshow(N, cmap=cm.jet)
        plt.xticks([])
        plt.yticks([])
        plt.title("Noise field %d" % (k + 1))

plt.show()
예제 #42
0
import cv2
import numpy as np
import matplotlib.pylab as plt


img = cv2.imread('../img/scaned_paper.jpg', cv2.IMREAD_GRAYSCALE) 
_, t_130 = cv2.threshold(img, 130, 255, cv2.THRESH_BINARY)        

t, t_otsu = cv2.threshold(img, -1, 255,  cv2.THRESH_BINARY | cv2.THRESH_OTSU) 
print('otsu threshold:', t)

imgs = {'Original': img, 't:130':t_130, 'otsu:%d'%t: t_otsu}
for i , (key, value) in enumerate(imgs.items()):
    plt.subplot(1, 3, i+1)
    plt.title(key)
    plt.imshow(value, cmap='gray')
    plt.xticks([]); plt.yticks([])

plt.show()
예제 #43
0
ts_log_diff1 = ts_log.diff(1)
ts_log_diff2 = ts_log.diff(2)
ts_log_diff1.plot()
ts_log_diff2.plot()
plt.show()

# 分解decomposing
decomposition = seasonal_decompose(ts)

trend = decomposition.trend  # 趋势
seasonal = decomposition.seasonal  # 季节性
plt.plot(seasonal['2018-01-01':'2018-01-31'])
ax = plt.gca()
ax.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d'))  #设置时间标签显示格式
plt.xticks(pd.date_range('2018-01-01', '2018-01-31', freq='1D'))
plt.gcf().autofmt_xdate()
plt.show()
residual = decomposition.resid  # 剩余的

plt.subplot(411)
plt.plot(ts, label='Original')
plt.legend(loc='best')
plt.subplot(412)
plt.plot(trend, label='Trend')
plt.legend(loc='best')
plt.subplot(413)
plt.plot(seasonal, label='Seasonarity')
plt.legend(loc='best')
plt.subplot(414)
plt.plot(residual, label='Residual')
            sum(
                cdist(
                    airline_norm.iloc[kmeans.labels_ == j, :],
                    kmeans.cluster_centers_[j].reshape(1,
                                                       airline_norm.shape[1]),
                    "euclidean")))
    TWSS.append(sum(WSS))

WSS
TWSS

# Scree plot
plt.plot(k, TWSS, 'ro-')
plt.xlabel("No_of_Clusters")
plt.ylabel("total_within_SS")
plt.xticks(k)

# Selecting 5 clusters as 5 looks optimal from the above scree plot
model = KMeans(n_clusters=5)
model.fit(airline_norm)

model.labels_
md = pd.Series(model.labels_)

Airlines1 = Airlines.copy()
Airlines1[
    'cluster_id'] = md  # creating a  new column for assigning cluster values

grouped_clusters = Airlines1.iloc[:, 1:].groupby(Airlines1.cluster_id).mean()
grouped_clusters
예제 #45
0
파일: nips3mm.py 프로젝트: Gabighz/nips2015
        cur_mean = np.mean(dbg_prfs_other_ds_[-1, 2, :])
        f1_mean_per_lambda.append(cur_mean)
        cur_std = np.std(dbg_prfs_other_ds_[-1, 2, :])
        f1_std_per_lambda.append(cur_std)
        print('F1 means: %.2f +/- %.2f (SD)' % (cur_mean, cur_std))

    f1_mean_per_lambda = np.array(f1_mean_per_lambda)
    f1_std_per_lambda = np.array(f1_std_per_lambda)

    plt.figure()
    ind = np.arange(4)
    width = 1.
    colors = [#(7., 116., 242.), #(7., 176., 242.)
        #(7., 136., 217.), (7., 40., 164.), (1., 4., 64.)]
        (7., 176., 242.), (7., 136., 217.), (7., 40., 164.), (1., 4., 64.)]
    my_colors = [(x/256, y/256, z/256) for x, y, z in colors]

    plt.bar(ind, f1_mean_per_lambda, yerr=f1_std_per_lambda,
            width=width, color=my_colors)
    plt.ylabel('mean F1 score (+/- SD)')
    plt.title('out-of-dataset performance\n'
              '%i components' % n_comp)
    tick_strs = [u'low-rank $\lambda=%.2f$' % val for val in lambs]
    plt.xticks(ind + width / 2., tick_strs, rotation=320)
    plt.ylim(.5, 1.0)
    plt.grid(True)
    plt.yticks(np.linspace(0.5, 1., 11), np.linspace(0.5, 1., 11))
    plt.tight_layout()
    out_path2 = op.join(WRITE_DIR, 'f1_bars_comp=%i.png' % n_comp)
    plt.savefig(out_path2)
예제 #46
0
def all_mem_trasnmission():

    naive = []
    shared_i = []

    x1 = []
    x2 = []

    vec1 = np.arange(2, 12, 1)

    for i in vec1:
        naive.append((3.0 * i))
        shared_i.append((i + 2.0))

    vec2 = np.arange(2, 12, 2)

    for i in vec2:
        x1.append((3.0 * i))
        x2.append((i + 2.0))

    fig = plt.figure(figsize=(8.5, 8.5))

    plt.xticks(vec2)
    ax1 = fig.add_subplot(1, 1, 1)

    # ax1.plot(vec1, naive, color='r', mew= 2, linestyle='-')
    # ax1.plot(vec1, shared_i, color='b', mew= 2, linestyle='-')

    ax1.plot(vec2, x1, '-or', label="Naive", markersize=10)
    ax1.plot(vec2, x2, '-xb', label="Shared inputs", mew=4, markersize=10)

    ax1.set_title('Storage', fontsize=20, fontweight='bold')

    for tick in ax1.xaxis.get_major_ticks():
        tick.label.set_fontsize(16)

    for tick in ax1.yaxis.get_major_ticks():
        tick.label.set_fontsize(16)

    plt.ylabel('Memory space [bytes]', fontsize=20, fontweight='bold')
    plt.xlabel('MAC Units', fontsize=20, fontweight='bold')
    plt.xlim((1.8, 10.2))
    plt.ylim((min(x2) - 0.4, max(x1) + 0.4))
    plt.grid(True)
    plt.legend(fontsize=16, loc=2)

    plt.savefig('./mem_space2.eps', bbox_inches='tight')
    plt.savefig('./mem_space2.pdf', bbox_inches='tight')
    plt.savefig('./mem_space2.jpg', bbox_inches='tight')

    c = 1600.0  # imagen por pixeles
    k = 3.0
    y1 = []
    y2 = []
    y3 = []

    x1 = []
    x2 = []
    x3 = []

    vec1 = np.arange(2, 12, .5)
    for i in vec1:
        y1.append(k * c + 6)
        y2.append(((c - i - k + 1) / i + 1) * (i + k - 1))
        y3.append(((c - i - k + 1) / i + 1) * i)

    vec2 = [2, 4, 6, 8, 10]
    for i in vec2:
        x1.append(k * c + 6)
        x2.append(((c - i - k + 1) / i + 1) * (i + k - 1))
        x3.append(((c - i - k + 1) / i + 1) * i)

    fig = plt.figure(figsize=(8.5, 8.5))

    plt.xticks(vec2)
    ax1 = fig.add_subplot(1, 1, 1)
    # ax1.plot(vec1, y1, color='g', mew= 2, linestyle='--')
    # ax1.plot(vec1, y2, color='r', mew=0.01, linestyle='--')
    # ax1.plot(vec1, y3, color='b', mew=0.01, linestyle='--')

    ax1.plot(vec2, x1, '-og', label="naive", markersize=10)
    ax1.plot(vec2, x2, '-xr', label="shared inputs", mew=4, markersize=10)
    ax1.plot(vec2,
             x3,
             '-sb',
             label="shared inputs with circular shift",
             markersize=10)

    ax1.set_title('Transmission', fontsize=20, fontweight='bold')

    for tick in ax1.xaxis.get_major_ticks():
        tick.label.set_fontsize(16)

    for tick in ax1.yaxis.get_major_ticks():
        tick.label.set_fontsize(16)

    plt.ylabel('Data sent [Kb]', fontsize=20, fontweight='bold')
    plt.xlabel('MAC Units', fontsize=20, fontweight='bold')
    plt.xlim((1.8, 10.2))
    plt.ylim((1500, 5000))
    plt.grid(True)
    plt.legend(fontsize=16, loc=0)

    plt.savefig('./data_sent.eps', bbox_inches='tight')
    plt.savefig('./data_sent.pdf', bbox_inches='tight')
    plt.savefig('./data_sent.jpg', bbox_inches='tight')

    plt.show()
예제 #47
0
파일: Churn_Fun.py 프로젝트: csjp/Python
# Print the feature ranking
print("Feature ranking:")

for f in range(10):
    print(("%d. %s (%f)" % (f + 1, df.columns[f], importances[indices[f]])))

# Plot the feature importances of the forest
#import pylab as pl
plt.figure()
plt.title("Feature importances")
plt.bar(list(range(10)),
        importances[indices],
        yerr=std[indices],
        color="r",
        align="center")
plt.xticks(list(range(10)), indices)
plt.xlim([-1, 10])
plt.show()

# %%
print("~ Transforming Data ~")
scaler = StandardScaler()
X = scaler.fit_transform(X)
print("Feature space holds %d observations and %d features" % X.shape)
print("Unique target labels:", np.unique(y))

# %%
print("~ Building K-Fold Cross-Validations ~")


def run_cv(X, y, clf):
예제 #48
0
def plot_results(hist_file, save_dir, mode):
    if mode == 'pi':
        hist_pi_file = hist_file
        with open(hist_pi_file, 'rb') as fp:
            hist = pickle.load(fp)

        ##- Setting up a plot for loss and training acc. graphs
        # --------------------------------------------------------
        plt.plot(hist['loss'], linewidth=2, color='b', label='Train')
        plt.plot(hist['val_loss'], linewidth=2, color='r', label='Valida.')

        plt.grid()
        # ~ plt.grid(linestyle='dotted')
        plt.grid(color='black', linestyle='--', linewidth=1)
        plt.ylabel('Loss', fontsize=18)
        plt.xlabel('Epoch', fontsize=18)
        plt.xticks(fontsize=12, rotation=0)
        plt.yticks(fontsize=12, rotation=0)
        plt.title('Loss')
        plt.legend(shadow=False, fancybox=False)
        plt.tight_layout()
        # ~ plt.show()
        plt.savefig(save_dir + 'loss_test.png')
        plt.close()

        plt.plot(hist['acc'], linewidth=3, color='b', label='Train')
        plt.plot(hist['val_acc'], linewidth=3, color='r', label='Valida.')

        plt.grid()
        plt.grid(color='black', linestyle='--', linewidth=1)
        plt.ylabel('Accuracy', fontsize=18)
        plt.xlabel('Epoch', fontsize=18)
        plt.xticks(fontsize=12, rotation=0)
        plt.yticks(fontsize=12, rotation=0)
        plt.title('Accuracy')
        plt.legend(shadow=False, fancybox=False, loc='lower right')
        plt.tight_layout()
        plt.savefig(save_dir + 'acc_test.png')
        # ~ plt.close()

    elif mode == 'json':
        hist_json_file = hist_file
        with open(hist_json_file, "r") as f:
            hist = json.load(f)

        ##- Setting up a plot for loss and training acc. graphs
        # --------------------------------------------------------

        plt.plot(np.squeeze(hist['loss']), linewidth=2, color='b', label='Train')
        plt.plot(np.squeeze(hist['val_loss']), linewidth=2, color='r', label='Valida.')

        plt.grid()
        # ~ plt.grid(linestyle='dotted')
        plt.grid(color='black', linestyle='--', linewidth=1)
        plt.ylabel('Loss', fontsize=18)
        plt.xlabel('Epoch', fontsize=18)
        plt.xticks(fontsize=12, rotation=0)
        plt.yticks(fontsize=12, rotation=0)
        plt.title('Loss')
        plt.legend(shadow=False, fancybox=False)
        plt.tight_layout()
        # ~ plt.show()
        plt.savefig(save_dir + 'loss_test.png')
        plt.close()

        plt.plot(np.squeeze(hist['acc']), linewidth=3, color='b', label='tr_bp_acc')
        plt.plot(np.squeeze(hist['val_acc']), linewidth=3, color='r', label='te_bp_acc')
        plt.plot(np.squeeze(hist['w12_acc']), linewidth=3, color='g', label='te_w12_acc')

        plt.grid()
        plt.grid(color='black', linestyle='--', linewidth=1)
        plt.ylabel('Accuracy', fontsize=18)
        plt.xlabel('Epoch', fontsize=18)
        plt.xticks(fontsize=12, rotation=0)
        plt.yticks(fontsize=12, rotation=0)
        plt.title('Accuracy')
        plt.legend(shadow=False, fancybox=False, loc='lower right')
        plt.tight_layout()
        plt.savefig(save_dir + 'acc_test.png')
        # ~ plt.close()
import seaborn as sns
from matplotlib import pylab as plt
my_title = 'Single-subject prediction: %2.2f%%' % (np.mean(accs) * 100)
disp_coefs = np.squeeze(clf.coef_)
TH = 1.50
color_order = np.array(["#e74c3c"] * len(disp_coefs))
#iord = np.argsort(disp_coefs)
color_order[disp_coefs < 0] = "#3498db"
my_palette = sns.color_palette(color_order)
plt.figure()
bar_hdl = sns.barplot(
    np.array(top5_cols),
    disp_coefs,  #ax=axes[i_topic],
    palette=my_palette,
    n_boot=100,
    ci=1.0)
for item in bar_hdl.get_xticklabels():
    item.set_rotation(90)
ticks = plt.xticks()[0]
sns.despine(bottom=True)
plt.xticks(ticks, np.array(top5_cols))
plt.grid(True, alpha=0.25)
plt.ylabel(
    'Contribution to response prediction')  # (+/- bootstrapped uncertainty')

plt.title(my_title)
plt.savefig('classif_top5_barplot_.pdf', dpi=600)
plt.savefig('classif_top5_barplot_.png', dpi=600)
#plt.show()
예제 #50
0
def graph_output(file_name, date_part = ''):
    import matplotlib.pylab as pylab
    file_handle = open(file_name, 'r')
    file_data = file_handle.read()
    rows = file_data.split('\n')
    data = [
        [], # Concurrency level
        [], # Raw successes
        [], # Raw fails
        [], # Successes/total
        [],
        [],
    ]
    for row in rows:
        items = [ itm for itm in row.split(' ') if len(itm) > 0]
        if not 'Concurrency' in items:
            continue
        concurrency_level = int(items[3])
        totals_idx = items.index('Total')
        successes_idx = totals_idx + items[totals_idx:].index('succ') + 1
        fails_idx = totals_idx + items[totals_idx:].index('fail') + 1
        total_success = int(items[successes_idx])
        total_fails = int(items[fails_idx])
        total_no_report = 0
        if 'no_report' in items:
            total_no_report = int(items[items.index('no_report') + 1])
        data[0].append(concurrency_level)
        data[1].append(total_success)
        data[2].append(total_fails)
        data[3].append(0)
        data[4].append(total_no_report)
    for idx in range(0, len(data[0])):
        data[3][idx] = data[1][idx] * 1.0/(data[1][idx] + data[2][idx])
    indices = range(0, len(data[0]))
    pylab.bar(indices, data[3], align='center', log=False)
    pylab.xticks(indices, data[0])
    pylab.ylabel('Success Rate Fration')
    if date_part is not None and len(date_part) > 0:
        pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s\n(%s)'%(date_part))
    else:
        pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s')
    pylab.savefig(file_name + 'linear.png', dpi=400)
    pylab.clf()
    pylab.bar(indices, data[3], align='center', log=True)
    pylab.xticks(indices, data[0])
    pylab.ylabel('Success Rate Fration')
    if date_part is not None and len(date_part) > 0:
        pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s\n(%s)'%(date_part))
    else:
        pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s')
    pylab.savefig(file_name + 'log.png', dpi=400)

    if any(map(lambda k : k > 0, data[4])):
        pylab.clf()
        for idx in range(0, len(data[0])):
            data[3][idx] = data[1][idx] * 1.0/(data[1][idx] + data[2][idx] + data[4][idx])
        pylab.bar(indices, data[3], align='center', log=False)
        pylab.xticks(indices, data[0])
        pylab.ylabel('Success Rate Fration')
        if date_part is not None and len(date_part) > 0:
            pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s\n(%s)'%(date_part))
        else:
            pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s')
        pylab.savefig(file_name + 'linear_noreport_fail.png', dpi=400)
        pylab.clf()
        pylab.bar(indices, data[3], align='center', log=True)
        pylab.xticks(indices, data[0])
        pylab.ylabel('Success Rate Fration')
        if date_part is not None and len(date_part) > 0:
            pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s\n(%s)'%(date_part))
        else:
            pylab.title('LIGO Frame Read Success Rates at Rate Limit = 1 MB/s')
        pylab.savefig(file_name + 'log_noreport_fail.png', dpi=400)
        rates = [data[4][idx]/float(data[0][idx]) for idx in range(0, len(data[0]))]
        pylab.clf()
        pylab.bar(indices, rates, align='center', log=False)
        pylab.xticks(indices, data[0])
        pylab.ylabel('No Report Rate Fration')
        pylab.title('No Report/Concurrency Level')
        pylab.savefig(file_name + 'noreport.png', dpi=400)
예제 #51
0
def scatter_embeddings(
        input_df,
        embedding_columns=None,
        classes_column="true_categ",
        classes_set=None,
        color_dict=None,
        color_by_column=None,
        pltcmap='hot',
        title=None, dest_path="./outputs", savename="scatter",
        alphas=None, init_point_size=25.0, point_size_ds=0.9,
        axes_ticks=True, xmin=None, xmax=None, ymin=None, ymax=None,):
    """
    Modified from: Pooya Mobadersay.

    This function generates the scatterplots for embeddings.

    Parameters
    ----------
    input_df: Pandas DataFrame
      e.g. loaded nucleus_metadata_and_embeddings.csv file

    embedding_columns: list of strings
      list of column names of embeddings in all_results

    classes_column: string
      name of the column that contains class labels

    classes_set: list of strings
      List of desired classes to visualize (in the same order to vis.)

    color_dict: dictionary
      dictionary that maps class names to their desired colors in visualiz.

    color_by_column: str
      name of column to color by (color map intensity based on this column)

    axis_ticks: Boolean
      Whether you want to include the axes ticks in the figure or not

    title: String
      This will be the title of the figure

    dest_path: String
      The destination path to save the results

    alphas: list of floats with only two elements [a, b]
      a, b should be in [0.0, 1.0] interval
      This defines the desired alpha channels' interval across all the classes.
      The first value will be assigned to the first class and the last value
      will be assigned to the last class; any class in between will get alphas
      with constant difference inside the interval.

      if a>b then the alphas will decrease from the first class to the end
      if a<b then the alphas will increase from the first class to the end
      if a==b then the alphas will be constant from the first class to the end

      Lower alpha means more transparency.

    init_point_size: Float
      The initial data point size assigned to the first class

    point_size_ds: Float
      Point size Down Scaling factor, the point sizes will decrease with this
      scale from one class to other. If you want to have same data point sizes
      across all classes make this value 1.0


    Example
    ----------
    color_dict = {'tumor_nonMitotic': [255, 0, 0],
                  'tumor_mitotic': [255, 191, 0],
                  'nonTILnonMQ_stromal': [0, 230, 77],
                  'macrophage': [51, 102, 153],
                  'lymphocyte': [0, 0, 255],
                  'plasma_cell': [0, 255, 255],
                  'other_nucleus': [0, 0, 0],
                  'AMBIGUOUS': [80, 80, 80]
    draw_scatter(all_results="./all_results.csv", color_dict=color_dict)
    """
    assert color_dict or color_by_column
    embedding_columns = embedding_columns or ['embedding_0', 'embedding_1']
    classes_set = classes_set or [
        'macrophage',
        'tumor_nonMitotic',
        'lymphocyte',
        'tumor_mitotic',
        'nonTILnonMQ_stromal',
        'plasma_cell'
    ]
    alphas = alphas or [0.8, 0.4]

    plt.figure(figsize=(7,7))

    embedding = input_df.loc[:, embedding_columns].values
    classes = input_df.loc[:, classes_column].values

    # normalize the specific feature to be used for coloring/alpha
    if color_by_column is not None:
        colby = input_df.loc[:, color_by_column].values
        colby = (colby - np.nanmean(colby)) / np.nanstd(colby)
        colby -= np.nanmin(colby)
        colby /= np.nanmax(colby)
    else:
        colby = np.ones((input_df.shape[0],), dtype='float')

    # determine colors
    if color_dict is not None:
        # different colors for different nucleus classes
        color_map = np.float32(input_df.loc[:, classes_column].map(color_dict).tolist())
        color_map /= 255.
        color_map = np.concatenate((color_map, colby[:, None]), axis=1)
    else:
        # the color itself is determined by a specific feature
        from matplotlib import cm
        cmp = cm.get_cmap(pltcmap, 100)
        color_map = cmp(colby)

    # defined to make the points more transparent for overlayed scatterplots
    alphas = np.linspace(alphas[0], alphas[1], len(classes_set))
    point_size = init_point_size
    if not os.path.exists(dest_path):
        os.makedirs(dest_path)
    x = embedding[:, 0]
    y = embedding[:, 1]
    for (i, cla) in enumerate(classes_set):
        keep = classes == cla
        colors = color_map[keep]
        colors[:, 3] = colors[:, 3] * alphas[i]
        plt.scatter(
            x[keep], y[keep], c=color_map[keep], label=cla,
            s=point_size, edgecolors='none')
        point_size = point_size_ds * point_size

    if not axes_ticks:
        plt.xticks([], [])
        plt.yticks([], [])
    if all([j is not None for j in (xmin, xmax, ymin, ymax)]):
        plt.xlim(xmin, xmax)
        plt.ylim(ymin, ymax)
    # plt.xlabel(embedding_columns[0])
    # plt.ylabel(embedding_columns[1])
    plt.title(label=title, fontsize=14, fontweight='bold')
    # if color_dict is not None:
    #     plt.legend()

    plt.savefig(opj(dest_path, f'{savename}_{color_by_column}.png'))
    # plt.savefig(opj(dest_path, f'{savename}_{color_by_column}.svg'))
    plt.close()
예제 #52
0
def select_windows(data_trace, synthetic_trace, event_latitude,
                   event_longitude, event_depth_in_km,
                   station_latitude, station_longitude, minimum_period,
                   maximum_period,
                   min_cc=0.10, max_noise=0.10, max_noise_window=0.4,
                   min_velocity=2.4, threshold_shift=0.30,
                   threshold_correlation=0.75, min_length_period=1.5,
                   min_peaks_troughs=2, max_energy_ratio=10.0,
                   min_envelope_similarity=0.2,
                   verbose=False, plot=False):
    """
    Window selection algorithm for picking windows suitable for misfit
    calculation based on phase differences.

    Returns a list of windows which might be empty due to various reasons.

    This function is really long and a lot of things. For a more detailed
    description, please see the LASIF paper.

    :param data_trace: The data trace.
    :type data_trace: :class:`~obspy.core.trace.Trace`
    :param synthetic_trace: The synthetic trace.
    :type synthetic_trace: :class:`~obspy.core.trace.Trace`
    :param event_latitude: The event latitude.
    :type event_latitude: float
    :param event_longitude: The event longitude.
    :type event_longitude: float
    :param event_depth_in_km: The event depth in km.
    :type event_depth_in_km: float
    :param station_latitude: The station latitude.
    :type station_latitude: float
    :param station_longitude: The station longitude.
    :type station_longitude: float
    :param minimum_period: The minimum period of the data in seconds.
    :type minimum_period: float
    :param maximum_period: The maximum period of the data in seconds.
    :type maximum_period: float
    :param min_cc: Minimum normalised correlation coefficient of the
        complete traces.
    :type min_cc: float
    :param max_noise: Maximum relative noise level for the whole trace.
        Measured from maximum amplitudes before and after the first arrival.
    :type max_noise: float
    :param max_noise_window: Maximum relative noise level for individual
        windows.
    :type max_noise_window: float
    :param min_velocity: All arrivals later than those corresponding to the
        threshold velocity [km/s] will be excluded.
    :type min_velocity: float
    :param threshold_shift: Maximum allowable time shift within a window,
        as a fraction of the minimum period.
    :type threshold_shift: float
    :param threshold_correlation: Minimum normalised correlation coeeficient
        within a window.
    :type threshold_correlation: float
    :param min_length_period: Minimum length of the time windows relative to
        the minimum period.
    :type min_length_period: float
    :param min_peaks_troughs: Minimum number of extrema in an individual
        time window (excluding the edges).
    :type min_peaks_troughs: float
    :param max_energy_ratio: Maximum energy ratio between data and
        synthetics within a time window. Don't make this too small!
    :type max_energy_ratio: float
    :param min_envelope_similarity: The minimum similarity of the envelopes of
        both data and synthetics. This essentially assures that the
        amplitudes of data and synthetics can not diverge too much within a
        window. It is a bit like the inverse of the ratio of both envelopes
        so a value of 0.2 makes sure neither amplitude can be more then 5
        times larger than the other.
    :type min_envelope_similarity: float
    :param verbose: No output by default.
    :type verbose: bool
    :param plot: Create a plot of the algortihm while it does its work.
    :type plot: bool
    """
    # Shortcuts to frequently accessed variables.
    data_starttime = data_trace.stats.starttime
    data_delta = data_trace.stats.delta
    dt = data_trace.stats.delta
    npts = data_trace.stats.npts
    synth = synthetic_trace.data
    data = data_trace.data
    times = data_trace.times()

    # Fill cache if necessary.
    if not TAUPY_MODEL_CACHE:
        from obspy.taup import TauPyModel  # NOQA
        TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
    model = TAUPY_MODEL_CACHE["model"]

    # -------------------------------------------------------------------------
    # Geographical calculations and the time of the first arrival.
    # -------------------------------------------------------------------------
    dist_in_deg = geodetics.locations2degrees(station_latitude,
                                              station_longitude,
                                              event_latitude, event_longitude)
    dist_in_km = geodetics.calc_vincenty_inverse(
        station_latitude, station_longitude, event_latitude,
        event_longitude)[0] / 1000.0

    # Get only a couple of P phases which should be the first arrival
    # for every epicentral distance. Its quite a bit faster than calculating
    # the arrival times for every phase.
    # Assumes the first sample is the centroid time of the event.
    tts = model.get_travel_times(source_depth_in_km=event_depth_in_km,
                                 distance_in_degree=dist_in_deg,
                                 phase_list=["ttp"])
    # Sort just as a safety measure.
    tts = sorted(tts, key=lambda x: x.time)
    first_tt_arrival = tts[0].time

    # -------------------------------------------------------------------------
    # Window settings
    # -------------------------------------------------------------------------
    # Number of samples in the sliding window. Currently, the length of the
    # window is set to a multiple of the dominant period of the synthetics.
    # Make sure it is an uneven number; just to have a trivial midpoint
    # definition and one sample does not matter much in any case.
    window_length = int(round(float(2 * minimum_period) / dt))
    if not window_length % 2:
        window_length += 1

    # Use a Hanning window. No particular reason for it but its a well-behaved
    # window and has nice spectral properties.
    taper = np.hanning(window_length)

    # =========================================================================
    # check if whole seismograms are sufficiently correlated and estimate
    # noise level
    # =========================================================================

    # Overall Correlation coefficient.
    norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2))
    cc = np.sum(data * synth) / norm
    if verbose:
        _log_window_selection(data_trace.id,
                              "Correlation Coefficient: %.4f" % cc)

    # Estimate noise level from waveforms prior to the first arrival.
    idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
    idx_end = max(10, idx_end)
    idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
    idx_start = max(10, idx_start)

    if idx_start >= idx_end:
        idx_start = max(0, idx_end - 10)

    abs_data = np.abs(data)
    noise_absolute = abs_data[idx_start:idx_end].max()
    noise_relative = noise_absolute / abs_data.max()

    if verbose:
        _log_window_selection(data_trace.id,
                              "Absolute Noise Level: %e" % noise_absolute)
        _log_window_selection(data_trace.id,
                              "Relative Noise Level: %e" % noise_relative)

    # Basic global rejection criteria.
    accept_traces = True
    if (cc < min_cc) and (noise_relative > max_noise / 3.0):
        msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc)
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    if noise_relative > max_noise:
        msg = "Noise level %.3f is above threshold of %.3f" % (
            noise_relative, max_noise)
        if verbose:
            _log_window_selection(
                data_trace.id, msg)
        accept_traces = msg

    # Calculate the envelope of both data and synthetics. This is to make sure
    # that the amplitude of both is not too different over time and is
    # used as another selector. Only calculated if the trace is generally
    # accepted as it is fairly slow.
    if accept_traces is True:
        data_env = obspy.signal.filter.envelope(data)
        synth_env = obspy.signal.filter.envelope(synth)

    # -------------------------------------------------------------------------
    # Initial Plot setup.
    # -------------------------------------------------------------------------
    # All the plot calls are interleaved. I realize this is really ugly but
    # the alternative would be to either have two functions (one with plots,
    # one without) or split the plotting function in various subfunctions,
    # neither of which are acceptable in my opinion. The impact on
    # performance is minimal if plotting is turned off: all imports are lazy
    # and a couple of conditionals are cheap.
    if plot:
        import matplotlib.pylab as plt  # NOQA
        import matplotlib.patheffects as PathEffects  # NOQA

        if accept_traces is True:
            plt.figure(figsize=(18, 12))
            plt.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.95,
                                wspace=None, hspace=0.0)
            grid = (31, 1)

            # Axes showing the data.
            data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8)
        else:
            # Only show one axes it the traces are not accepted.
            plt.figure(figsize=(18, 3))

        # Plot envelopes if needed.
        if accept_traces is True:
            plt.plot(times, data_env, color="black", alpha=0.5, lw=0.4,
                     label="data envelope")
            plt.plot(synthetic_trace.times(), synth_env, color="#e41a1c",
                     alpha=0.4, lw=0.5, label="synthetics envelope")

        plt.plot(times, data, color="black", label="data", lw=1.5)
        plt.plot(synthetic_trace.times(), synth, color="#e41a1c",
                 label="synthetics", lw=1.5)

        # Symmetric around y axis.
        middle = data.mean()
        d_max, d_min = data.max(), data.min()
        r = max(d_max - middle, middle - d_min) * 1.1
        ylim = (middle - r, middle + r)
        xlim = (times[0], times[-1])
        plt.ylim(*ylim)
        plt.xlim(*xlim)

        offset = (xlim[1] - xlim[0]) * 0.005
        plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2)
        plt.text(first_tt_arrival + offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "first arrival", verticalalignment="top",
                 horizontalalignment="left", color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        plt.vlines(first_tt_arrival - minimum_period / 2.0, ylim[0], ylim[1],
                   colors="#ff7f00", lw=2)
        plt.text(first_tt_arrival - minimum_period / 2.0 - offset,
                 ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                 "first arrival - min period / 2", verticalalignment="bottom",
                 horizontalalignment="right", color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        for velocity in [6, 5, 4, 3, min_velocity]:
            tt = dist_in_km / velocity
            plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2)
            if velocity == min_velocity:
                hal = "right"
                o_s = -1.0 * offset
            else:
                hal = "left"
                o_s = offset
            plt.text(tt + o_s, ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                     str(velocity) + " km/s", verticalalignment="bottom",
                     horizontalalignment=hal, color="0.15")
        plt.vlines(dist_in_km / min_velocity + minimum_period / 2.0,
                   ylim[0], ylim[1], colors="gray", lw=2)
        plt.text(dist_in_km / min_velocity + minimum_period / 2.0 - offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "min surface velocity + min period / 2",
                 verticalalignment="top",
                 horizontalalignment="right", color="0.15", path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        plt.hlines(noise_absolute, xlim[0], xlim[1], linestyle="--",
                   color="gray")
        plt.hlines(-noise_absolute, xlim[0], xlim[1], linestyle="--",
                   color="gray")
        plt.text(offset, noise_absolute + (ylim[1] - ylim[0]) * 0.01,
                 "noise level", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")
        plt.gca().xaxis.set_ticklabels([])

        # Plot the basic global information.
        ax = plt.gca()
        txt = (
            "Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f"
            % (cc, noise_absolute, noise_relative))
        ax.text(0.01, 0.95, txt, transform=ax.transAxes,
                fontdict=dict(fontsize="small", ha='left', va='top'),
                bbox=dict(boxstyle="round", fc="w", alpha=0.8))
        plt.suptitle("Channel %s" % data_trace.id, fontsize="larger")

    # Show plot and return if not accepted.
        if accept_traces is not True:
            txt = "Rejected: %s" % (accept_traces)
            ax.text(0.99, 0.95, txt, transform=ax.transAxes,
                    fontdict=dict(fontsize="small", ha='right', va='top'),
                    bbox=dict(boxstyle="round", fc="red", alpha=1.0))
            plt.show()
    if accept_traces is not True:
        return []

    # Initialise masked arrays. The mask will be set to True where no
    # windows are chosen.
    time_windows = np.ma.ones(npts)
    time_windows.mask = False
    if plot:
        old_time_windows = time_windows.copy()

    # Elimination Stage 1: Eliminate everything half a period before or
    # after the minimum and maximum travel times, respectively.
    # theoretical arrival as positive.
    min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt)
    max_idx = int(math.ceil((
        dist_in_km / min_velocity + minimum_period / 2.0) / dt))
    time_windows.mask[:min_idx + 1] = True
    time_windows.mask[max_idx:] = True
    if plot:
        plt.subplot2grid(grid, (8, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TRAVELTIME ELIMINATION")
        old_time_windows = time_windows.copy()

    # -------------------------------------------------------------------------
    # Compute sliding time shifts and correlation coefficients for time
    # frames that passed the traveltime elimination stage.
    # -------------------------------------------------------------------------
    # Allocate arrays to collect the time dependent values.
    sliding_time_shift = np.ma.zeros(npts, dtype="float32")
    sliding_time_shift.mask = True
    max_cc_coeff = np.ma.zeros(npts, dtype="float32")
    max_cc_coeff.mask = True

    for start_idx, end_idx, midpoint_idx in _window_generator(npts,
                                                              window_length):
        if not min_idx < midpoint_idx < max_idx:
            continue

        # Slice windows. Create a copy to be able to taper without affecting
        # the original time series.
        data_window = data[start_idx: end_idx].copy() * taper
        synthetic_window = \
            synth[start_idx: end_idx].copy() * taper

        # Elimination Stage 2: Skip windows that have essentially no energy
        # to avoid instabilities. No windows can be picked in these.
        if synthetic_window.ptp() < synth.ptp() * 0.001:
            time_windows.mask[midpoint_idx] = True
            continue

        # Calculate the time shift. Here this is defined as the shift of the
        # synthetics relative to the data. So a value of 2, for instance, means
        # that the synthetics are 2 timesteps later then the data.
        cc = np.correlate(data_window, synthetic_window, mode="full")

        time_shift = cc.argmax() - window_length + 1
        # Express the time shift in fraction of the minimum period.
        sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period

        # Normalized cross correlation.
        max_cc_value = cc.max() / np.sqrt((synthetic_window ** 2).sum() *
                                          (data_window ** 2).sum())
        max_cc_coeff[midpoint_idx] = max_cc_value

    if plot:
        plt.subplot2grid(grid, (9, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="NO ENERGY IN CC WINDOW")
        # Axes with the CC coeffs
        plt.subplot2grid(grid, (15, 0), rowspan=4)
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.hlines(-threshold_shift, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.hlines(threshold_shift, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.text(5, -threshold_shift - (2) * 0.03,
                 "threshold", verticalalignment="top",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, sliding_time_shift, color="#377eb8",
                 label="Time shift in fraction of minimum period", lw=1.5)
        ylim = plt.ylim()
        plt.yticks([-0.75, 0, 0.75])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0])
        plt.ylim(-1.0, 1.0)
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

        plt.subplot2grid(grid, (10, 0), rowspan=4)
        plt.hlines(threshold_correlation, xlim[0], xlim[1], color="0.15",
                   linestyle="--")
        plt.hlines(1, xlim[0], xlim[1], color="lightgray")
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.text(5, threshold_correlation + (1.4) * 0.01,
                 "threshold", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, max_cc_coeff, color="#4daf4a",
                 label="Maximum CC coefficient", lw=1.5)
        plt.ylim(-0.2, 1.2)
        plt.yticks([0, 0.5, 1])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

    # Elimination Stage 3: Mark all areas where the normalized cross
    # correlation coefficient is under threshold_correlation as negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[max_cc_coeff < threshold_correlation] = True
    if plot:
        plt.subplot2grid(grid, (14, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="CORRELATION COEFF THRESHOLD ELIMINATION")

    # Elimination Stage 4: Mark everything with an absolute travel time
    # shift of more than # threshold_shift times the dominant period as
    # negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True
    if plot:
        plt.subplot2grid(grid, (19, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TIME SHIFT THRESHOLD ELIMINATION")

    # Elimination Stage 5: Mark the area around every "travel time shift
    # jump" (based on the traveltime time difference) negative. The width of
    # the area is currently chosen to be a tenth of a dominant period to
    # each side.
    if plot:
        old_time_windows = time_windows.copy()
    sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
    indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0]
    for index in indices:
        time_windows.mask[index - sample_buffer: index + sample_buffer] = True
    if plot:
        plt.subplot2grid(grid, (20, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TIME SHIFT JUMPS ELIMINATION")

    # Clip both to avoid large numbers by division.
    stacked = np.vstack([
        np.ma.clip(synth_env, synth_env.max() * min_envelope_similarity * 0.5,
                   synth_env.max()),
        np.ma.clip(data_env, data_env.max() * min_envelope_similarity * 0.5,
                   data_env.max())])
    # Ratio.
    ratio = stacked.min(axis=0) / stacked.max(axis=0)

    # Elimination Stage 6: Make sure the amplitudes of both don't vary too
    # much.
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[ratio < min_envelope_similarity] = True
    if plot:
        plt.subplot2grid(grid, (25, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION")

    if plot:
        plt.subplot2grid(grid, (21, 0), rowspan=4)
        plt.hlines(min_envelope_similarity, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.text(5, min_envelope_similarity + (2) * 0.03,
                 "threshold", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                 PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, ratio, color="#9B59B6",
                 label="Envelope amplitude similarity", lw=1.5)
        plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
        plt.ylim(0.05, 1.05)
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

    # First minimum window length elimination stage. This is cheap and if
    # not done it can easily destabilize the peak-and-trough marching stage
    # which would then have to deal with way more edge cases.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (26, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 1")

    # -------------------------------------------------------------------------
    # Peak and trough marching algorithm
    # -------------------------------------------------------------------------
    final_windows = []
    for i in flatnotmasked_contiguous(time_windows):
        # Cut respective windows.
        window_npts = i.stop - i.start
        synthetic_window = synth[i.start: i.stop]
        data_window = data[i.start: i.stop]

        # Find extrema in the data and the synthetics.
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)

        window_mask = np.ones(window_npts, dtype="bool")

        closest_peaks = find_closest(data_p, synth_p)
        diffs = np.diff(closest_peaks)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_p[idx - 1]
            else:
                start = 0
            if idx < (len(synth_p) - 1):
                end = synth_p[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        closest_troughs = find_closest(data_t, synth_t)
        diffs = np.diff(closest_troughs)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_t[idx - 1]
            else:
                start = 0
            if idx < (len(synth_t) - 1):
                end = synth_t[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        window_mask = np.ma.masked_array(window_mask,
                                         mask=window_mask)

        if window_mask.mask.all():
            continue

        for j in flatnotmasked_contiguous(window_mask):
            final_windows.append((i.start + j.start, i.start + j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False
    if plot:
        plt.subplot2grid(grid, (27, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="PEAK AND TROUGH MARCHING ELIMINATION")

    # Loop through all the time windows, remove windows not satisfying the
    # minimum number of peaks and troughs per window. Acts mainly as a
    # safety guard.
    old_time_windows = time_windows.copy()
    for i in flatnotmasked_contiguous(old_time_windows):
        synthetic_window = synth[i.start: i.stop]
        data_window = data[i.start: i.stop]
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)
        if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \
                min_peaks_troughs:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (28, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="PEAK/TROUGH COUNT ELIMINATION")

    # Second minimum window length elimination stage.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (29, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 2")

    # Final step, eliminating windows with little energy.
    final_windows = []
    for j in flatnotmasked_contiguous(time_windows):
        # Again assert a certain minimal length.
        if (j.stop - j.start) < min_length:
            continue

        # Compare the energy in the data window and the synthetic window.
        data_energy = (data[j.start: j.stop] ** 2).sum()
        synth_energy = (synth[j.start: j.stop] ** 2).sum()
        energies = sorted([data_energy, synth_energy])
        if energies[1] > max_energy_ratio * energies[0]:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due to energy ratio between "
                    "data and synthetics.")
            continue

        # Check that amplitudes in the data are above the noise
        if noise_absolute / data[j.start: j.stop].ptp() > \
                max_noise_window:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due having no amplitude above the "
                    "signal to noise ratio.")
        final_windows.append((j.start, j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False

    if plot:
        plt.subplot2grid(grid, (30, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="LITTLE ENERGY ELIMINATION")

    if verbose:
        _log_window_selection(
            data_trace.id,
            "Done, Selected %i window(s)" % len(final_windows))

    # Final step is to convert the index value windows to actual times.
    windows = []
    for start, stop in final_windows:
        start = data_starttime + start * data_delta
        stop = data_starttime + stop * data_delta
        windows.append((start, stop))

    if plot:
        # Plot the final windows to the data axes.
        import matplotlib.transforms as mtransforms  # NOQA
        ax = data_plot
        trans = mtransforms.blended_transform_factory(ax.transData,
                                                      ax.transAxes)
        for start, stop in final_windows:
            ax.fill_between([start * data_delta, stop * data_delta], 0, 1,
                            facecolor="#CDDC39", alpha=0.5, transform=trans)

        plt.show()

    return windows
예제 #53
0
    def plotCorrelations(self):
        """
        plot the covariances/correlations
        
        normalize_indices: string of index letters used to select which dimension to normalize
            'rgsd': all indices (correlation matrix)
            '': verbatim covariance matrix
            ''rg': variances between realms and between derivatives are normalized (default)
        """
        cov = self.tns[self.covariancesName].data
        sigmas = _np.sqrt(self.tns[self.covariancesName].data_diagonal)
        title = "Correlations"

        sigmamax_inv = 1.0 / _np.clip(sigmas, 1e-6, _np.inf)
        cov_scaled = sigmamax_inv[:, :, :, None, None,
                                  None] * cov * sigmamax_inv[None, None,
                                                             None, :, :, :]
        vmax = _np.max(cov_scaled)

        len_r = self.tns['r'].size
        len_d = self.tns['d'].size
        len_g = self.tns['g'].size
        len_all = len_r * len_g * len_d

        cov_reordered = _np.transpose(cov_scaled,
                                      axes=(0, 1, 2, 0 + 3, 1 + 3,
                                            2 + 3))  #to srgd
        image = _np.reshape(cov_reordered, (len_all, len_all))
        gridvectorX = _np.arange(0, len_all, 1)
        gridvectorY = _np.arange(len_all, 0, -1)

        fig = _plt.figure(figsize=(3.4, 3.4))
        _plt.pcolor(gridvectorX,
                    gridvectorY,
                    image,
                    cmap=_cmapCorrelations,
                    vmin=-vmax,
                    vmax=vmax)

        _plt.axis([0, image.shape[0], 0, image.shape[1]])
        _plt.gca().set_aspect('equal', 'box')

        line_positions = _np.reshape(_np.arange(len_all),
                                     cov_reordered.shape[:3])
        for r_idx in range(len_r):
            for g_idx in range(len_g):
                for d_idx in range(len_d):
                    linewidth = 0.5
                    linestyle = '-'
                    if g_idx != 0:
                        linewidth = 0.2
                        linestyle = ':'
                    if d_idx != 0:
                        linewidth = 0.0
                    if linewidth > 0.0:
                        _plt.axhline(line_positions[r_idx, g_idx, d_idx],
                                     color='k',
                                     linewidth=linewidth,
                                     linestyle=linestyle)
                        _plt.axvline(line_positions[r_idx, g_idx, d_idx],
                                     color='k',
                                     linewidth=linewidth,
                                     linestyle=linestyle)

        baselength = len_d
        ticklabels = []
        ticks = []
        offsets = []
        for r in range(len_r):
            for g2 in range(2 * len_g):
                ticks.append(((r) * len_r + g2 / 2) * baselength)
                g2mod2 = g2 % 2
                if g2mod2 == 1:
                    ticklabels.append(g2 // 2)
                    offsets.append(0.0)
                else:
                    ticklabels.append(r)
                    offsets.append(baselength)
        for tick, label, offset in zip(ticks, ticklabels, offsets):
            t = _plt.text(
                offset, tick, label, {
                    'verticalalignment': 'center',
                    'horizontalalignment': 'right',
                    'size': 'xx-small'
                })
        _plt.yticks([])
        _plt.text(0.0,
                  ticks[0] + 0.3 * baselength,
                  "$g$",
                  fontdict={
                      'verticalalignment': 'bottom',
                      'horizontalalignment': 'right',
                      'size': 'small'
                  })
        _plt.text(-10.0,
                  ticks[0] + 0.3 * baselength,
                  "$r$",
                  fontdict={
                      'verticalalignment': 'bottom',
                      'horizontalalignment': 'right',
                      'size': 'small'
                  })

        #        #ticks in x:
        #        ticks = range( (len_dtilde)//2, len_all, (len_dtilde))
        #        ticklabels = []
        #        ticks=[]
        #        offsets=[]
        #        for s in range(len_stilde):
        #            for r in range(len_rtilde):
        #                for g in range(len_gtilde):
        #                    ticks.append( (((s)*len_rtilde + r)*len_gtilde + g)*len_dtilde + len_dtilde/2 )
        #                    ticklabels.append(g)
        #                    offsets.append(-1.0)
        #        for tick, label, offset in zip(ticks, ticklabels, offsets):
        #            t = _plt.text(tick, offset, label, fontdict={'verticalalignment':'top', 'horizontalalignment':'center', 'size':'xx-small'}, rotation=0)
        #        _plt.text(ticks[-1]+10, 0.0, "$\widetilde{g}$", fontdict={'verticalalignment':'top', 'horizontalalignment':'left', 'size':'small'})

        _plt.xticks([])

        _plt.colorbar(shrink=0.6,
                      aspect=40,
                      ticks=[-vmax, 0, vmax],
                      fraction=0.08)
        _plt.title(title)
        ax = _plt.gca()
예제 #54
0
    fig_size = [8, 6]
    params = {
        'backend': 'ps',
        'axes.labelsize': 11,
        'text.fontsize': 11,
        'font.size': 11,
        'font.weight': 'bold',
        'legend.fontsize': 8,
        'font.family': 'serif',
        'lines.markersize': 4,
        'axes.labelweight': 'bold',
        'xtick.labelsize': 8,
        'ytick.labelsize': 8,
        'figure.figsize': fig_size,
        'dpi': 600,
        'ps.papersize': 'auto'
    }
    plt.rcParams.update(params)

    # Plot the graph using matplotlib
    ax = plt.gca()
    xfmt = md.DateFormatter('%Y-%m-%d %H:%M')
    ax.xaxis.set_major_formatter(xfmt)
    plt.xticks(rotation=10)

    plt.title('Temperature Plot for sensor ' + sensor)
    plt.xlabel('Timestamp')
    plt.ylabel('Temperature')
    plt.plot(timestamp, temperature, 'g-')
    plt.show()
예제 #55
0
    for r, rho in zip(range(len(rhos)), rhos):
        data = np.loadtxt(formato % rho)
        Ek[r] = data[:, 1]
        Ep[r] = data[:, 3]
    Ts = data[:, 0]
    Ek = np.array(Ek)
    Ep = np.array(Ep)

    Ek_fermi = kinetic_energy(rhos / 4, Ts)
    plt.figure()
    plt.plot(rhos, Ek[:, -1], "ko--")
    plt.plot(rhos, 0.6 * Ef_rho * (rhos / 4)**(2 / 3), "k-")
    plt.xlabel(r"$\rho$ [fm$^{-3}$]", fontsize=18)
    plt.ylabel("$E_k$ [MeV]", fontsize=18)
    plt.axis([0.03, 0.25, 0, 30])
    plt.xticks(rhos)
    plt.yticks(np.concatenate([Ek[:, -1], 0.6 * Ef_rho * (rhos / 4)**(2 / 3)]))
    plt.grid()

    plt.figure()
    colores = ["y", "r", "b", "k", "c", "g"]
    for i in range(n_rhos):
        plt.plot(Ts[:-20], Ek[i, :-20], colores[i] + "o--")
    for i in range(n_rhos):
        plt.plot(Ts[:-20], Ek_fermi[i, :-20], colores[i] + "-", linewidth=5)
        plt.text(-0.12,
                 Ek_fermi[i, -20],
                 r"$%.2f$fm$^{-3}$" % rhos[i],
                 color=colores[i],
                 fontsize=14)
    plt.axis([-0.15, 4, 5, 35])
# Second row middle
pylab.subplot(4, 7,11)
pylab.title('Softmax Predictions for Correctly Classified Samples',fontsize=24)

# For 7 steps
for sub_i in range(7):
    # Draw the top row (digit images)
    pylab.subplot(4, 7, sub_i + 1)        
    pylab.imshow(np.squeeze(correctly_predicted[sub_i]),cmap='gray')    
    pylab.axis('off')
    
    # Draw the second row (prediction bar chart)
    pylab.subplot(4, 7, 7 + sub_i + 1)        
    pylab.bar(x_axis + padding, correct_predictions[sub_i], width)
    pylab.ylim([0.0,1.0])    
    pylab.xticks(x_axis, labels)

# Set titles for the third and fourth rows
pylab.subplot(4, 7, 18)
pylab.title('Incorrectly Classified Samples',fontsize=26)
pylab.subplot(4, 7,25)
pylab.title('Softmax Predictions for Incorrectly Classified Samples',fontsize=24)

# For 7 steps
for sub_i in range(7):
    # Draw the third row (incorrectly classified digit images)
    pylab.subplot(4, 7, 14 + sub_i + 1)
    pylab.imshow(np.squeeze(incorrectly_predicted[sub_i]),cmap='gray')
    pylab.axis('off')
    
    # Draw the fourth row (incorrect predictions bar chart)
def compute_correlations_from_within_ts_tests():
    args = {
        'timbre_spaces': list(sorted(load.database().keys())),
        'audio_representations': [
            'auditory_spectrum', 'fourier_spectrum', 'auditory_strf',
            'fourier_strf', 'auditory_spectrogram', 'fourier_spectrogram',
            'auditory_mps', 'fourier_mps'
        ],
        'res_foldername': './results_07-24-2018',
        'log_foldername': './outs/ts_crossval_fixed',
    }
    corrs = []
    for rs in args['audio_representations']:
        print(rs)
        corrs_rs = []
        for tsp_i, tsp in enumerate(args['timbre_spaces']):
            print(' ',tsp)
            input_data = np.loadtxt(args['res_foldername']+'/{}_{}_input_data.txt'.format(tsp, rs))
            target_data = np.loadtxt(args['res_foldername']+'/{}_{}_target_data.txt'.format(tsp, rs))
            sigmas_ref = np.loadtxt(args['res_foldername']+'/{}_{}_sigmas.txt'.format(tsp, rs))
            corrs_rs_tsp = []
            all_corrc = []
            all_sigmas = []
            for tsp_j, tsp_2 in enumerate(args['timbre_spaces']):
                if (tsp_i != tsp_j):
                    sigmas = np.loadtxt(args['res_foldername']+'/{}_{}_sigmas.txt'.format(tsp_2, rs))
                    all_sigmas.append(sigmas)
            sigmas = np.mean(all_sigmas, axis=0)

            for tsp_j, tsp_2 in enumerate(args['timbre_spaces']):
                if (tsp_i != tsp_j):
                    ndims, ninstrus = input_data.shape[0], input_data.shape[1]
                    no_samples = ninstrus * (ninstrus - 1) / 2
                    idx_triu = np.triu_indices(target_data.shape[0], k=1)
                    target_v = target_data[idx_triu]
                    mean_target = np.mean(target_v)
                    std_target = np.std(target_v)
                    kernel = np.zeros((ninstrus, ninstrus))
                    for i in range(ninstrus):
                            for j in range(i + 1, ninstrus):
                                kernel[i, j] = -np.sum(
                                    np.power(
                                        np.divide(input_data[:, i] - input_data[:, j],
                                                  (sigmas + np.finfo(float).eps)), 2))
                    kernel_v = kernel[idx_triu]
                    mean_kernel = np.mean(kernel_v)
                    std_kernel = np.std(kernel_v)
                    Jn = np.sum(np.multiply(kernel_v - mean_kernel, target_v - mean_target))
                    Jd = no_samples * std_target * std_kernel
                    corrs_rs_tsp.append(Jn/Jd)

            # for tsp_j, tsp_2 in enumerate(args['timbre_spaces']):
            #     if (tsp_i != tsp_j):
            #         sigmas = np.loadtxt(args['res_foldername']+'/{}_{}_sigmas.txt'.format(tsp_2, rs))
            #         ndims, ninstrus = input_data.shape[0], input_data.shape[1]
            #         no_samples = ninstrus * (ninstrus - 1) / 2
            #         idx_triu = np.triu_indices(target_data.shape[0], k=1)
            #         target_v = target_data[idx_triu]
            #         mean_target = np.mean(target_v)
            #         std_target = np.std(target_v)
            #         kernel = np.zeros((ninstrus, ninstrus))
            #         for i in range(ninstrus):
            #                 for j in range(i + 1, ninstrus):
            #                     kernel[i, j] = -np.sum(
            #                         np.power(
            #                             np.divide(input_data[:, i] - input_data[:, j],
            #                                       (sigmas + np.finfo(float).eps)), 2))
            #         kernel_v = kernel[idx_triu]
            #         mean_kernel = np.mean(kernel_v)
            #         std_kernel = np.std(kernel_v)
            #         Jn = np.sum(np.multiply(kernel_v - mean_kernel, target_v - mean_target))
            #         Jd = no_samples * std_target * std_kernel
            #         corrs_rs_tsp.append(Jn/Jd)
            #         print('   w/ {}: {:.5f} ({:.4f}, {:.4f})'.format(tsp_2, Jn/Jd, Jn, Jd))
            #         corrc = np.corrcoef(sigmas_ref, sigmas)[0,1]
            #         all_sigmas.append(sigmas)
            #         all_corrc.append(corrc)
            # print(' --- {}', np.mean(all_corrc))
            corrs_rs.append(np.mean(corrs_rs_tsp))
        corrs.append(corrs_rs)
    corrs_m = [np.mean(corrs_rs) for corrs_rs in corrs]
    idx = sorted(range(len(args['audio_representations'])), key=lambda k: args['audio_representations'][k])
    sorted_corrs = [corrs_m[k] for k in idx]
    sorted_labels = [args['audio_representations'][k] for k in idx]
    x = np.arange(len(corrs_m))
    plt.figure(figsize=(12,8))
    plt.plot(sorted_corrs, '-ok')
    plt.xticks(x, sorted_labels)
    plt.ylabel('correlation')
    # plt.savefig(
    #     'correlation_cross_ts.pdf',
    #     bbox_inches='tight')
    plt.show()
예제 #58
0
roiMat = np.array(roiMat)
roiMat = roiMat.astype(np.float)

font = {'family': 'normal', 'weight': 'normal', 'size': 10}
matplotlib.rc('font', **font)

plt.matshow(roiMat, aspect='auto')
#plt.colorbar()
plt.title('ROI Count for Each Model (Nonlinear)')
plt.xlabel('ROI')
plt.ylabel('Model')

#plt.xticks(range(0,30), ["ROI 1", "ROI 2", "ROI 3", "ROI 4", "ROI 5", "ROI 6", "ROI 7", "ROI 8", "ROI 9", "ROI 10", "ROI 11", "ROI 12", "ROI 13", "ROI 14", "ROI 15", "ROI 16", "ROI 17", "ROI 18", "ROI 19", "ROI 20", "ROI 21", "ROI 22", "ROI 23", "ROI 24", "ROI 25", "ROI 26", "ROI 27", "ROI 28", "ROI 29", "ROI 30"])
plt.xticks(range(0, 30), [
    "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14",
    "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26",
    "27", "28", "29", "30"
])
#plt.yticks(range(len(subjects)/2,len(subjects)*7, len(subjects)), ["EMOTION", "GAMBLING", "LANGUAGE", "MOTOR", "RELATIONAL", "SOCIAL", "WM"], rotation =90)
plt.yticks(range(len(subjects) / 2,
                 len(subjects) * 7, len(subjects)),
           ["E", "G", "L", "M", "R", "S", "W"],
           rotation=90)

for i in range(1, 7):
    plt.axhline((len(subjects) * i) - 0.5, color='k', linewidth=2)
'''
plt.axhline((len(subjects)*1.0)-0.5, color='k', linewidth=2)
plt.axhline((len(subjects)*2.0)-0.5, color='k', linewidth=2)
plt.axhline((len(subjects)*3.0)-0.5, color='k', linewidth=2)
plt.axhline((len(subjects)*4.0)-0.5, color='k', linewidth=2)
예제 #59
0
# Linear Regression
plt.title('Linear Regression')
plt.plot(x,
         y_lr,
         color='crimson',
         linewidth=1,
         linestyle='-',
         label='Predicted Stock Price')
plt.plot(x,
         y_true_1month,
         color='navy',
         linewidth=1,
         linestyle='-',
         label='True Stock Price')
plt.xticks(x[::10], rotation=45)
# label every 10
plt.xlabel('Time')
plt.ylabel('Stock Price ($)')
plt.legend()
plt.savefig('../Figure/Linear_Regression_Month.png', bbox_inches='tight')
plt.show()

# Support Vector Machine
plt.title('Support Vector Machine')
plt.plot(x,
         y_lr,
         color='crimson',
         linewidth=1,
         linestyle='-',
         label='Predicted Stock Price')
예제 #60
0
 def drawmeridians(self, locs, **kwargs):
     x,y = self(locs, np.zeros_like(locs))
     x = self.wrap(x, -2, 2)
     p.xticks(x, visible=False)
     p.grid(True)