示例#1
0
def matrixMontage(spcomps,*args, **kwargs):
    numcomps, width, height=spcomps.shape
    rowcols=int(np.ceil(np.sqrt(numcomps)));           
    for k,comp in enumerate(spcomps):        
        plt.subplot(rowcols,rowcols,k+1)       
        plt.imshow(comp,*args, **kwargs)                             
        plt.axis('off')         
    def plot_fit(self, size=None, tol=0.1, axis_on=True):

        n, d = self.D.shape

        if size:
            nrows, ncols = size
        else:
            sq = np.ceil(np.sqrt(n))
            nrows = int(sq)
            ncols = int(sq)

        ymin = np.nanmin(self.D)
        ymax = np.nanmax(self.D)
        print('ymin: {0}, ymax: {1}'.format(ymin, ymax))

        numplots = np.min([n, nrows * ncols])
        plt.figure()

        for n in range(numplots):
            plt.subplot(nrows, ncols, n + 1)
            plt.ylim((ymin - tol, ymax + tol))
            plt.plot(self.L[n, :] + self.S[n, :], 'r')
            plt.plot(self.L[n, :], 'b')
            if not axis_on:
                plt.axis('off')
示例#3
0
def plot_zipf(*freq):
	'''
	basic plotting using matplotlib and pylab
	'''
	ranks, frequencies = [], []
	langs, colors = [], []
	langs = ["English", "German", "Finnish"]
	colors = ['#FF0000', '#00FF00', '#0000FF']
	if bonus_part:
		colors.extend(['#00FFFF', '#FF00FF', '#FFFF00'])
		langs.extend(["English (Stemmed)", "German (Stemmed)", "Finnish (Stemmed)"])

	plt.subplot(111) # 1, 1, 1

	num = 6 if bonus_part else 3
	for i in xrange(num):
		ranks.append(range(1, len(freq[i]) + 1))
		frequencies.append([e[1] for e in freq[i]])

		# log x and y axi, both with base 10
		plt.loglog(ranks[i], frequencies[i], marker='', basex=10, color=colors[i], label=langs[i])

	plt.legend()
	plt.grid(True)
	plt.title("Zipf's law!")

	plt.xlabel('Rank')
	plt.ylabel('Frequency')

	plt.show()
示例#4
0
    def plot_fit(self, size=None, tol=0.1, axis_on=True):

        n, d = self.D.shape

        if size:
            nrows, ncols = size
        else:
            sq = np.ceil(np.sqrt(n))
            nrows = int(sq)
            ncols = int(sq)

        ymin = np.nanmin(self.D)
        ymax = np.nanmax(self.D)
        print 'ymin: {0}, ymax: {1}'.format(ymin, ymax)

        numplots = np.min([n, nrows * ncols])
        plt.figure()

        for n in xrange(numplots):
            plt.subplot(nrows, ncols, n + 1)
            plt.ylim((ymin - tol, ymax + tol))
            plt.plot(self.L[n, :] + self.S[n, :], 'r')
            plt.plot(self.L[n, :], 'b')
            if not axis_on:
                plt.axis('off')
示例#5
0
def matrixMontage(spcomps, *args, **kwargs):
    numcomps, width, height = spcomps.shape
    rowcols = int(np.ceil(np.sqrt(numcomps)))
    for k, comp in enumerate(spcomps):
        plt.subplot(rowcols, rowcols, k + 1)
        plt.imshow(comp, *args, **kwargs)
        plt.axis('off')
示例#6
0
    def subplotSingle2x(self,
                        figNum,
                        plotNum,
                        numRows,
                        numCols,
                        time,
                        data,
                        title='',
                        units='',
                        options=''):

        print("subplotSingle2x")

        plt.figure(figNum)
        if title:
            self.title = title
        if not units:
            self.units = units
        if self.preTitle:
            fig = plt.gcf()
            fig.canvas.set_window_title("%s" % (figNum, self.preTitle))
        if not figNum in self.sharex.keys():
            self.sharex[figNum] = plt.subplot(numRows, numCols, plotNum)
            plt.plot(time, data, options)

        plt.subplot(numRows, numCols, plotNum, sharex=self.sharex[figNum])
        #         plt.hold(True);
        plt.grid(True)
        plt.title("%s" % (self.title))
        plt.plot(time, data, options)
        plt.ylabel('(%s)' % (self.units))
        plt.margins(0.04)
示例#7
0
    def plot3setYspan(self, figNum, yspan=None):
        plt.figure(figNum)

        if yspan is None:
            return

        if not figNum in self.sharex.keys():
            self.sharex[figNum] = plt.subplot(3, 1, 1)

        # Plot 1
        subplt = plt.subplot(3, 1, 1, sharex=self.sharex[figNum])
        yl = subplt.get_ylim()
        med = (yl[1] - yl[0]) * 0.5 + yl[0]
        yl = [med - yspan * 0.5, med + yspan * 0.5]
        subplt.set_ylim(yl)

        # Plot 2
        subplt = plt.subplot(3, 1, 2, sharex=self.sharex[figNum])
        yl = subplt.get_ylim()
        med = (yl[1] - yl[0]) * 0.5 + yl[0]
        yl = [med - yspan * 0.5, med + yspan * 0.5]
        subplt.set_ylim(yl)

        # Plot 3
        subplt = plt.subplot(3, 1, 3, sharex=self.sharex[figNum])
        yl = subplt.get_ylim()
        med = (yl[1] - yl[0]) * 0.5 + yl[0]
        yl = [med - yspan * 0.5, med + yspan * 0.5]
        subplt.set_ylim(yl)
示例#8
0
def link_level_bars(levels, usages, quantiles, scheme, direction, color, nnames, lnames, admat=None):
    """
    Bar plots of nodes' link usage of links at different levels.
    """
    if not admat:
        admat = np.genfromtxt('./settings/eadmat.txt')
    if color == 'solar':
        cmap = Oranges_cmap
    elif color == 'wind':
        cmap = Blues_cmap
    elif color == 'backup':
        cmap = 'Greys'
    nodes, links = usages.shape
    usageLevels = np.zeros((nodes, levels))
    usageLevelsNorm = np.zeros((nodes, levels))
    for node in range(nodes):
        nl = neighbor_levels(node, levels, admat)
        for lvl in range(levels):
            ll = link_level(nl, lvl, nnames, lnames)
            ll = np.array(ll, dtype='int')
            usageSum = sum(usages[node, ll])
            linkSum = sum(quantiles[ll])
            usageLevels[node, lvl] = usageSum / linkSum
            if lvl == 0:
                usageLevelsNorm[node, lvl] = usageSum
            else:
                usageLevelsNorm[node, lvl] = usageSum / usageLevelsNorm[node, 0]
        usageLevelsNorm[:, 0] = 1

    # plot all nodes
    usages = usageLevels.transpose()
    plt.figure(figsize=(11, 3))
    ax = plt.subplot()
    plt.pcolormesh(usages[:, loadOrder], cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(1, nodes, nodes))
    ax.set_xticklabels(loadNames, rotation=60, ha="right", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.savefig(figPath + '/levels/' + str(scheme) + '/' + 'total' + '_' + str(direction) + '_' + color + '.pdf', bbox_inches='tight')
    plt.close()

    # plot all nodes normalised to usage of first level
    usages = usageLevelsNorm.transpose()
    plt.figure(figsize=(11, 3))
    ax = plt.subplot()
    plt.pcolormesh(usages[:, loadOrder], cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(1, nodes, nodes))
    ax.set_xticklabels(loadNames, rotation=60, ha="right", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.savefig(figPath + '/levels/' + str(scheme) + '/' + 'total_norm_cont_' + str(direction) + '_' + color + '.pdf', bbox_inches='tight')
    plt.close()
示例#9
0
    def plot2piFft(self, func, Fs, L):
        ''' Fs is the sampling freq. 
            L is length of signal list.
            This plot is for a func that has period of 2pi.

            If you found the time domain wave is not very accurate,
            that is because you set too small Fs, which leads to
            to big step Ts.
        '''
        base_freq = 1.0/(2*np.pi) #频域横坐标除以基频,即以基频为单位,此处的基频为 2*pi rad/s
        Ts = 1.0/Fs
        t = [el*Ts for el in range(0,L)]
        x = [func(el) for el in t]

        # https://www.ritchievink.com/blog/2017/04/23/understanding-the-fourier-transform-by-example/

        # 小明给的代码:
        # sampleF = Fs
        # print('小明:')
        # for f, Y in zip(
        #                 np.arange(0, len(x)*sampleF,1) * 1/len(x) * sampleF, 
        #                 np.log10(np.abs(np.fft.fft(x) / len(x))) 
        #              ):
            # print('\t', f, Y)


        L_4pi = int(4*np.pi / Ts) +1 # 画前两个周期的
        
        self.fig_plot2piFft = plt.figure(7)
        plt.subplot(211)
        plt.plot(t[:L_4pi], x[:L_4pi])
        #title('Signal in Time Domain')
        #xlabel('Time / s')
        #ylabel('x(t)')
        plt.title('Winding Function')
        plt.xlabel('Angular location along air gap [mech. rad.]')
        plt.ylabel('Current Linkage by unit current [Ampere]')

        NFFT = 2**nextpow2(L)
        print('NFFT =', NFFT, '= 2^%g' % (nextpow2(L)), '>= L =', L)
        y = fft(x,NFFT) # y is a COMPLEX defined in numpy
        Y = [2 * el.__abs__() / L for el in y] # /L for spectrum aplitude consistent with actual signal. 2* for single-sided. abs for amplitude.
        f = Fs/2.0/base_freq*linspace(0,1,int(NFFT/2+1)) # unit is base_freq Hz
        #f = Fs/2.0*linspace(0,1,NFFT/2+1) # unit is Hz

        plt.subplot(212)
        plt.plot(f, Y[0:int(NFFT/2+1)])
        plt.title('Single-Sided Amplitude Spectrum of x(t)')
        plt.xlabel('Frequency divided by base_freq [base freq * Hz]')
        #plt.ylabel('|Y(f)|')
        plt.ylabel('Amplitude [1]')
        plt.xlim([0,50])
def plot_scenarios(scenarios):
    nrows = len(scenarios)
    fig = plt.figure(figsize=(24, nrows))
    n_plot = nrows
    plt.axis('off')
    # plot fake samples
    for iplot in range(nrows):
        for jplot in range(24):
            ax = plt.subplot(n_plot, 24, iplot * 24 + jplot + 1)
            if iplot == 0:
                ax.annotate(f'{jplot:02d}'
                            ':00',
                            xy=(0.5, 1),
                            xytext=(0, 5),
                            xycoords='axes fraction',
                            textcoords='offset points',
                            size='large',
                            ha='center',
                            va='baseline')
            im = plt.imshow(scenarios[iplot, jplot - 1, :, :],
                            cmap=plt.cm.gist_earth_r,
                            norm=LogNorm(vmin=0.01, vmax=50))
            plt.axis('off')
    fig.subplots_adjust(right=0.93)
    cbar_ax = fig.add_axes([0.93, 0.15, 0.007, 0.7])
    cbar = fig.colorbar(im, cax=cbar_ax)
    cbar.set_label('fraction of daily precipitation', fontsize=16)
    cbar.ax.tick_params(labelsize=16)

    return fig
 def _init_plot_axis_2(self, gs):
     ax2 = plt.subplot(gs[1], sharex=self.ax1)
     ax2.axhline(0,ls='-.', color='red')
     ax2.grid('on')
     #ax2.spines['top'].set_visible(False)
     ax2.xaxis.tick_bottom()
     ax2.set_ylim(self.ylim2)
     ajust_xaxis_tick_labels(ax2)
     ax2.set_yticks(np.arange(-0.06,0.061,0.03))
     self.ax2 = ax2
示例#12
0
def _plot_base(dep, val, deplim_small, xlim_small, xlabel):
    plt.subplot(1,2,1)
    plt.plot(val, dep)
    plt.gca().invert_yaxis()
    plt.grid('on')
    plt.ylabel('depth/km')
    plt.xlabel(xlabel)
    locs, labels = plt.xticks()
    plt.setp(labels, rotation=-45)

    plt.subplot(1,2,2)
    plt.plot(val, dep)
    plt.gca().invert_yaxis()
    plt.grid('on')
    plt.ylim(deplim_small)
    plt.xlim(xlim_small)
    plt.xlabel(xlabel)
    locs, labels = plt.xticks()
    plt.setp(labels, rotation=-45)
示例#13
0
def _plot_base(dep, val, deplim_small, xlim_small, xlabel):
    plt.subplot(1, 2, 1)
    plt.plot(val, dep)
    plt.gca().invert_yaxis()
    plt.grid('on')
    plt.ylabel('depth/km')
    plt.xlabel(xlabel)
    locs, labels = plt.xticks()
    plt.setp(labels, rotation=-45)

    plt.subplot(1, 2, 2)
    plt.plot(val, dep)
    plt.gca().invert_yaxis()
    plt.grid('on')
    plt.ylim(deplim_small)
    plt.xlim(xlim_small)
    plt.xlabel(xlabel)
    locs, labels = plt.xticks()
    plt.setp(labels, rotation=-45)
示例#14
0
 def _init_plot_axis_2(self, gs):
     ax2 = plt.subplot(gs[1], sharex=self.ax1)
     ax2.axhline(0, ls='-.', color='red')
     ax2.grid('on')
     #ax2.spines['top'].set_visible(False)
     ax2.xaxis.tick_bottom()
     ax2.set_ylim(self.ylim2)
     ajust_xaxis_tick_labels(ax2)
     ax2.set_yticks(np.arange(-0.06, 0.061, 0.03))
     self.ax2 = ax2
示例#15
0
def main():

    r = compute()
    fig = plt.Figure(figsize=(8, 6))
    ax = plt.subplot(1, 1, 1, aspect=1 / 21)
    im = ax.imshow(r, origin="upper", extent=[0, 1, 1, 0], cmap=plt.cm.gray_r)
    plt.colorbar(im)

    ax.set_ylabel("Proportion of beach seen")
    ax.set_xlabel("Position")

    plt.savefig("{}/a_priori_x_vs_r_{}.pdf".format(folder, mode))
    plt.show()
示例#16
0
def load_mnist(path, filename='mnist.pkl.gz', plot=True):
    """
    Loads the MNIST dataset. Downloads the data if it doesn't already exist.
    This code is adapted from the deeplearning.net tutorial on classifying
    MNIST data with Logistic Regression: http://deeplearning.net/tutorial/logreg.html#logreg
    :param path: (str) Path to where data lives or should be downloaded too
    :param filename: (str) name of mnist file to download or load
    :return: train_set, valid_set, test_set
    """
    dataset = '{}/{}'.format(path, filename)
    data_dir, data_file = os.path.split(dataset)

    if data_dir == "" and not os.path.isfile(dataset):
        new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
        if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
            dataset = new_path

    if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
        import urllib

        origin = ('http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz')
        print 'Downloading data from {}'.format(origin)
        urllib.urlretrieve(origin, dataset)

    print '... loading data'
    f = gzip.open(dataset, 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()

    X_train = train_set[0]
    y_train = train_set[1]
    if plot:
        for k in range(25):
            plt.subplot(5,5,k)
            plt.imshow(np.reshape(X_train[k,:], (28,28)))
            plt.axis('off')
            plt.title(y_train[k])

    return train_set, valid_set, test_set
示例#17
0
def plot_L_curve(
    files,
    nlin_pars=['log10_He_', 'log10_visM_', 'rake'],
    nlin_pars_ylabels=[r'$log_{10}(He)$', r'$log_{10}(visM)$', 'rake'],
):
    nreses = collect_from_result_files(files, 'residual_norm_weighted')
    nroughs = collect_from_result_files(files, 'roughening_norm')
    num_subplots = 1 + len(nlin_pars)

    x1 = amin(nreses)
    x2 = amax(nreses)
    dx = x2 - x1
    xlim = (x1 - dx * 0.02, x2 + dx * 0.2)
    xticks = range(int(x1), int(x2), 5)

    plt.subplot(num_subplots, 1, 1)
    plt.loglog(nreses, nroughs, 'o-')
    plt.xlim(xlim)
    plt.gca().set_xticks(xticks)
    plt.gca().get_xaxis().set_major_formatter(
        matplotlib.ticker.ScalarFormatter())
    plt.ylabel('roughening')
    plt.xlabel('Residual Norm')
    plt.grid('on')

    nth = 2
    for par, par_label in zip(nlin_pars, nlin_pars_ylabels):
        y = collect_from_result_files(files, par)
        plt.subplot(num_subplots, 1, nth)
        plt.semilogx(nreses, y, 'o-')
        plt.xlim(xlim)
        plt.gca().set_xticks(xticks)
        plt.gca().get_xaxis().set_major_formatter(
            matplotlib.ticker.ScalarFormatter())
        plt.ylabel(par_label)
        plt.xlabel('Residual Norm')
        plt.grid('on')
        nth += 1
示例#18
0
def plot_L_curve(files,
                 nlin_pars = ['log10_He_','log10_visM_','rake'],
                 nlin_pars_ylabels = [r'$log_{10}(He)$',
                                      r'$log_{10}(visM)$',
                                      'rake'],
                 ):
    nreses = collect_from_result_files(files, 'residual_norm_weighted')
    nroughs = collect_from_result_files(files, 'roughening_norm')
    num_subplots = 1 + len(nlin_pars)

    x1 = amin(nreses)
    x2 = amax(nreses)
    dx = x2 - x1
    xlim = (x1-dx*0.02, x2+dx*0.2)
    xticks = range(int(x1), int(x2),5)

    plt.subplot(num_subplots,1,1)
    plt.loglog(nreses, nroughs,'o-')
    plt.xlim(xlim)
    plt.gca().set_xticks(xticks)
    plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
    plt.ylabel('roughening')
    plt.xlabel('Residual Norm')
    plt.grid('on')

    nth = 2
    for par, par_label in zip(nlin_pars, nlin_pars_ylabels):
        y = collect_from_result_files(files, par)
        plt.subplot(num_subplots,1,nth)
        plt.semilogx(nreses, y,'o-')
        plt.xlim(xlim)
        plt.gca().set_xticks(xticks)
        plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
        plt.ylabel(par_label)
        plt.xlabel('Residual Norm')
        plt.grid('on')
        nth += 1
示例#19
0
def main():
    r = compute()
    fig = plt.Figure(figsize=(8, 6))
    ax = plt.subplot(1, 1, 1, aspect=1 / 21)
    im = ax.imshow(r, origin="lower", extent=[0, 1, 0, 1], cmap=plt.cm.gray_r)
    plt.colorbar(im)
    plt.title(
        cond.replace("_", " ").capitalize() +
        ", p={}, mode='{}'".format(fov, mode.replace("_", " ")))

    ax.set_xlabel("x2")
    ax.set_ylabel("x1")
    plt.savefig("../data/figures/a_priori_x_vs_x_{}_{}_{}.pdf".format(
        cond, fov * 100, mode))
    plt.show()
示例#20
0
    def _init_plot_axis_1(self, gs):
        ax1 = plt.subplot(gs[0])

        ax1.grid('on')
        ax1.set_ylabel('meter')
        ax1.set_ylim(self.ylim1)

        #ax1.set_title('(a)')
        #ax1.spines['bottom'].set_visible(False)
        ax1.xaxis.tick_top()
        ax1.axhline(0, ls='..', color='red')

        plt.setp(ax1.get_xticklabels(), visible=False)
        ax1.axhline(0, ls='-.', color='red')
        self.ax1 = ax1
    def _init_plot_axis_1(self, gs):
        ax1 = plt.subplot(gs[0])

        ax1.grid('on')
        ax1.set_ylabel('meter')
        ax1.set_ylim(self.ylim1)

        #ax1.set_title('(a)')
        #ax1.spines['bottom'].set_visible(False)
        ax1.xaxis.tick_top()
        ax1.axhline(0,ls='..', color='red')

        plt.setp(ax1.get_xticklabels(), visible=False)
        ax1.axhline(0,ls='-.', color='red')
        self.ax1 = ax1
示例#22
0
文件: hhsimple.py 项目: lungd/ODYNN-1
    def plot_results(self,
                     ts,
                     i_inj_values,
                     results,
                     ca_true=None,
                     suffix="",
                     show=True,
                     save=False):

        V = results[:, 0]
        a = results[:, 1]
        b = results[:, 2]

        il = self._i_L(V)
        ik = self._i_K(a, b, V)

        plt.figure()

        plt.subplot(4, 1, 1)
        plt.plot(ts, V, 'k')
        plt.title('Leaky Integrator Neuron')
        plt.ylabel('V (mV)')

        plt.subplot(4, 1, 2)
        plt.plot(ts, il, 'g', label='$I_{L}$')
        plt.plot(ts, ik, 'c', label='$I_{K}$')
        plt.ylabel('Current')
        plt.legend()

        plt.subplot(4, 1, 3)
        plt.plot(ts, a, 'c', label='a')
        plt.plot(ts, b, 'b', label='b')
        plt.ylabel('Gating Value')
        plt.legend()

        plt.subplot(4, 1, 4)
        plt.plot(ts, i_inj_values, 'b')
        plt.xlabel('t (ms)')
        plt.ylabel('$I_{inj}$ ($\\mu{A}/cm^2$)')
        # plt.ylim(-1, 40)

        utils.save_show(show, save, name='Results_{}'.format(suffix), dpi=300)
def draw_spectrum(data_list):
    T = 3600

    amp_spec, power_spec, freq = spectrum(data_list, T)

    print('Max amp in spectrum: {np.max(amp_spec)}')
    plt.figure(figsize=(18, 5))

    plt.subplot(131)
    x = list(range(len(data_list)))
    y = data_list
    plt.title("Observation wind data of Kyoto")
    plt.xlabel('Hours')
    plt.ylabel('Observation wind data of Kyoto')
    plt.plot(x, y, color='green')

    data_len = len(x)

    plt.subplot(132)
    plt.title("Power Spectrum of Wind ")
    x = freq[int(data_len / 2):]
    y = power_spec[int(data_len / 2):]

    # set 0 to 0Hz (DC component)
    y[0] = 0

    plt.xlabel('Frequency (Hz)')
    plt.ylabel('Intensity')
    plt.plot(x, y, color='orange')
    ax = plt.gca()

    x = x[1:]
    y = y[1:]

    ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
    coeffs = np.polyfit(np.log(x), np.log(y), 1)
    beta = -coeffs[0]
    dimension = 1 + (3 - beta) / 2
    print(beta)
    print("The fractal dimension is", dimension)

    plt.subplot(133)
    plt.title("the Curve of log(power-spectrum) and log(frequency)")
    plt.scatter(np.log(x), np.log(y), marker='o', s=10, c=list(range(len(x))))
    # plt.plot(np.log(x), np.log(y), 'o', mfc='none')
    plt.plot(np.log(x), np.polyval(coeffs, np.log(x)))
    plt.xlabel('log freq')
    plt.ylabel('log intensity')
    plt.savefig("../pics/kyoto_wind.png")

    plt.show()
示例#24
0
def plotter(mode,Bc,Tc,Q):
    col = ['#000080','#0000FF','#4169E1','#6495ED','#00BFFF','#B0E0E6']
    plt.figure()
    ax = plt.subplot(111)
    for p in range(Bc.shape[1]):
        plt.plot(Tc[:,p],Bc[:,p],'-',color=str(col[p]))
    plt.xlabel('Tc [TW]')
    plt.ylabel('Bc normalised to total EU load')
    plt.title(str(mode)+' flow')
    
    # Shrink current axis by 25% to make room for legend
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])

    plt.legend(\
        ([str(Q[i]*100) for i in range(len(Q))]),\
        loc='center left', bbox_to_anchor=(1, 0.5),title='Quantiles')
    
    plt.savefig('figures/bctc_'+str(mode)+'.eps')
示例#25
0
def plot(results, path=os.path.expanduser("~/Desktop/results.pdf")):

    x = results[:, 0]
    y = results[:, 1]

    fig = plt.figure(figsize=(10, 6))
    ax = plt.subplot()

    ax.scatter(x, y, color='black', alpha=0.25)
    ax.set_xlabel("$r$")
    ax.set_ylabel("Diff of profits between $h_0$ and $h_1$")
    ax.set_title("Does strategic depth impact profits?")

    add_fitting_curve(ax, x, y)

    plt.tight_layout()

    plt.savefig(path)
    plt.show()
示例#26
0
    def plot_proportions(self):

        # Container for proportions of agents having this or that in hand according to their type
        #  - rows: type of agent
        # - columns: type of good

        fig = plt.figure(figsize=(25, 12))
        fig.patch.set_facecolor('white')

        n_lines = self.n_goods
        n_columns = 1

        x = np.arange(len(self.proportions))

        for agent_type in range(self.n_goods):

            # First subplot
            ax = plt.subplot(n_lines, n_columns, agent_type + 1)
            ax.set_title(
                "Proportion of agents of type {} having good i in hand\n".
                format(agent_type))

            y = []
            for i in range(self.n_goods):
                y.append([])

            for proportions_at_t in self.proportions:
                for good in range(self.n_goods):
                    y[good].append(proportions_at_t[agent_type, good])

            ax.set_ylim([-0.02, 1.02])

            for good in range(self.n_goods):
                ax.plot(x, y[good], label="Good {}".format(good), linewidth=2)

            ax.legend()

        plt.tight_layout()

        plt.savefig(filename=self.proportions_figure_name)
示例#27
0
def __draw_dig_metric(data, label):
    """
        draw dig metric graph with data and set title with locale.
    """
    dig_metrics_data = []
    dig_metrics_data = data
    r = np.array([y for x, y in dig_metrics_data])
    theta = np.array([x for x, y in dig_metrics_data])
    fig = plt.figure()
    ax = plt.subplot(111, polar=True)

    #define the bin spaces
    r_bins = np.linspace(0., round(max(r)), 12)
    N_theta = 36
    d_theta = 2. * np.pi / (N_theta + 1.)
    theta_bins = np.linspace(-d_theta / 2., 2. * np.pi + d_theta / 2., N_theta)

    H, theta_edges, r_edges = np.histogram2d(theta % (2. * np.pi),
                                             r,
                                             bins=(theta_bins, r_bins))

    #plot data in the middle of the bins
    r_mid = .5 * (r_edges[:-1] + r_edges[1:])
    theta_mid = .5 * (theta_edges[:-1] + theta_edges[1:])
    xlabel(label)

    cax = ax.contourf(theta_mid, r_mid, H.T, 10, cmap=plt.cm.Spectral)
    ax.scatter(theta, r, color='None')
    ax.set_rmax(round(max(r)))
    grid(True)
    canvas = pylab.get_current_fig_manager().canvas
    canvas.draw()

    pil_image = Image.frombytes("RGB", canvas.get_width_height(),
                                canvas.tostring_rgb())
    pylab.close()
    return pil_image
示例#28
0
    def plotFuncObj(self, func):
        x = np.arange(0, 2*np.pi, 0.5/180*np.pi)
        y = [func(el) for el in x]
        x = [el/np.pi for el in x] # x for plot. unit is pi
        
        self.fig_plotFuncObj = plt.figure()
        ax = plt.subplot(111) #注意:一般都在ax中设置,不在plot中设置
        ax.plot(x, y)

        xmajorLocator   = plt.MultipleLocator(0.25) #将x主刻度标签设置为20的倍数
        xmajorFormatter = plt.FormatStrFormatter('%.2fπ') #设置x轴标签文本的格式
        ax.xaxis.set_major_locator(xmajorLocator)  
        ax.xaxis.set_major_formatter(xmajorFormatter)

        ##matplotlib.pyplot.minorticks_on()
        ##xminorLocator   = MultipleLocator(0.25)
        ##xminorFormatter = FormatStrFormatter(u'%.2fπ')
        ##ax.xaxis.set_minor_locator(xminorLocator)  
        ##ax.xaxis.set_minor_formatter(xminorFormatter)

        plt.xlabel('Angular location along the gap [mech. rad.]')
        plt.ylabel('Turns of winding [1]')
        # plt.title('Turn Function or Winding Function')
        plt.grid(True) # or ax.grid(True)
示例#29
0
        nres = fid['misfit/norm_weighted'][...]
        nreses.append(nres)

        m = fid['m'][...]
        visMs.append(m[-3])
        Hes.append(m[-2])
        rakes.append(m[-1])

        nrough = fid['regularization/roughening/norm'][...]
        nroughs.append(nrough)

xlim = (7, 22)
xlim = None
xticks = range(7,22)

plt.subplot(411)    
plt.semilogx(nreses, visMs,'o')
plt.xlim(xlim)
plt.gca().set_xticks(xticks)
plt.grid('on')
plt.ylabel('log10(visM/(Pa.s))')

plt.subplot(412)    
plt.semilogx(nreses, Hes,'o')
plt.xlim(xlim)
plt.gca().set_xticks(xticks)
plt.grid('on')
plt.ylabel('He/km')

plt.subplot(413)    
plt.semilogx(nreses, rakes,'o')
示例#30
0
def autocorrelate(data, unbias=2, normalize=2, plot_test=False):
    """
    Compute the autocorrelation coefficients for time series data.

    Here we use scipy.signal.correlate, but the results are the same as in
    Yang, et al., 2012 for unbias=1:
    "The autocorrelation coefficient refers to the correlation of a time
    series with its own past or future values. iGAIT uses unbiased
    autocorrelation coefficients of acceleration data to scale the regularity
    and symmetry of gait.
    The autocorrelation coefficients are divided by fc(0) in Eq. (6),
    so that the autocorrelation coefficient is equal to 1 when t=0 ::
        NFC(t) = fc(t) / fc(0)
    Here NFC(t) is the normalised autocorrelation coefficient, and fc(t) are
    autocorrelation coefficients."

    Parameters
    ----------
    data : numpy array
        time series data
    unbias : integer or None
        unbiased autocorrelation: divide by range (1) or by weighted range (2)
    normalize : integer or None
        normalize: divide by 1st coefficient (1) or by maximum abs. value (2)
    plot_test : Boolean
        plot?

    Returns
    -------
    coefficients : numpy array
        [normalized, unbiased] autocorrelation coefficients
    N : integer
        number of coefficients

    Examples
    --------
    >>> import numpy as np
    >>> from mhealthx.signals import autocorrelate
    >>> data = np.random.random(100)
    >>> unbias = 2
    >>> normalize = 2
    >>> plot_test = True
    >>> coefficients, N = autocorrelate(data, unbias, normalize, plot_test)

    """
    import numpy as np
    from scipy.signal import correlate

    # Autocorrelation:
    coefficients = correlate(data, data, 'full')
    coefficients = coefficients[coefficients.size/2:]
    N = coefficients.size

    # Plot:
    if plot_test:
        from pylab import plt
        t = np.linspace(0, N, N)
        plt.figure()
        plt.subplot(3, 1, 1)
        plt.plot(t, coefficients, 'k-', label='coefficients')
        plt.title('coefficients')

    # Unbiased:
    if unbias:
        if unbias == 1:
            coefficients /= (N - np.arange(N))
        elif unbias == 2:
            coefficient_ratio = coefficients[0]/coefficients[-1]
            coefficients /= np.linspace(coefficient_ratio, 1, N)
        else:
            raise IOError("unbias should be set to 1, 2, or None")
        # Plot:
        if plot_test:
            plt.subplot(3, 1, 2)
            plt.plot(t, coefficients, 'k-', label='coefficients')
            plt.title('unbiased coefficients')

    # Normalize:
    if normalize:
        if normalize == 1:
            coefficients /= np.abs(coefficients[0])
        elif normalize == 2:
            coefficients /= np.max(np.abs(coefficients))
        else:
            raise IOError("normalize should be set to 1, 2, or None")
        # Plot:
        if plot_test:
            plt.subplot(3, 1, 3)
            plt.plot(t, coefficients, 'k-', label='coefficients')
            plt.title('normalized coefficients')

    # Plot:
    if plot_test:
        plt.show()

    return coefficients, N
示例#31
0
    1/0
    trajs_full = cpa_space.calc_trajectory(pts=pts0,mysign=1,**params_flow_int)
                                       
    
    
    
#    v_at_trajs_full = np.zeros_like(trajs_full)     
#    for _pts,_v in zip(trajs_full,v_at_trajs_full):
#        cpa_space.calc_v(pat=pat, pts=_pts, out=_v)
    
    pts_grid=cpa_space.x_dense_grid_img
#    pts_grid = np.asarray([xx,yy]).copy() 
    grid_shape = pts_grid[0].shape
                               
    fig = plt.figure()       
    plt.subplot(234)
#    plt.imshow(cell_idx.reshape(Ny,Nx))
    plt.imshow(cell_idx.cpu.reshape(grid_shape))
    
    plt.subplot(231)
    scale=[2*30,1.5*4][vol_preserve]
    
    
    
    cpa_space.quiver(pts_grid,v_dense,scale, ds=16/2)          
    config_plt()
    
   
     
    
    plt.subplot(232)
# Run simulation
print("Start simulation")
solver.set_integration_method(caphe.solvers.runge_kutta4)
solver.set_internal_dt(dt)
solver.solve(t0=t0, t1=t1, dt=dt, environment=env)
print("Done simulating")

# Retrieve results
times, states, outputs, sources = solver.get_states_and_output_and_sources()

#from output_mapping import get_outputs_map
#print get_outputs_map(caphe_node)
# %%
# Plotting
#
plt.subplot(311)
plt.plot(times * 1e9,
         np.abs(sources[:, 0]),
         'g',
         linewidth=1,
         label='Electrical input')
plt.plot(times * 1e9, np.abs(outputs[:, 0]), 'r', linewidth=2, label='Output')
plt.legend()
plt.subplot(312)
plt.plot(times * 1e9,
         np.real(states[:, 0]),
         'k',
         label='N (number of free electrons)')
plt.legend()
plt.subplot(313)
plt.plot(times * 1e9,
path = '../../../datasets/SocialMedia/word2vec_mean_gt/val_InstaCities1M.txt'
file = open(path, "r")
num_topics = 400

print "Loading data ..."
print path

for line in file:
    d = line.split(',')
    regression_values = np.zeros(num_topics)
    for t in range(0, num_topics):
        regression_values[t] = d[t + 1]
    break

regression_values = np.log(regression_values)

print "Max: " + str(regression_values.max())
print "Mmin: " + str(regression_values.min())
print "Mean: " + str(np.mean(regression_values))
print "Sum: " + str(sum(regression_values))

fig = plt.figure()
ax1 = plt.subplot(111)
ax1.set_xlim([0, num_topics])
ax1.set_ylim([0, regression_values.max()])

it_axes = (arange(num_topics) * 1)

ax1.plot(it_axes, regression_values, linestyle=':', color='b')

plt.show()
示例#34
0
文件: tu.py 项目: QQ1134614268/Python
from pylab import plt, np, xlim, ylim, figure, plot, xticks, yticks, subplot, show

x = np.arange(-5.0, 5.0, 0.02)
y1 = np.sin(x)

plt.figure(1)
plt.subplot(211)
plt.plot(x, y1)

plt.subplot(212)
# 设置x轴范围
xlim(-2.5, 2.5)
# 设置y轴范围
ylim(-1, 1)
plt.plot(x, y1)

# evenly sampled time at 200ms intervals
t = np.arange(0., 5., 0.2)

# red dashes, blue squares and green triangles
plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
plt.show()

'==========================================='
plt.figure(1)  # 第一张图
plt.subplot(211)  # 第一张图中的第一张子图
plt.plot([1, 2, 3])
plt.subplot(212)  # 第一张图中的第二张子图
plt.plot([4, 5, 6])

plt.figure(2)  # 第二张图
def train(n_epochs, _batch_size, start_epoch=0):
    """
        train with fixed batch_size for given epochs
        make some example plots and save model after each epoch
    """
    global batch_size
    batch_size = _batch_size
    # create a dataqueue with the keras facilities. this allows
    # to prepare the data in parallel to the training
    sample_dataqueue = GeneratorEnqueuer(generate_real_samples(batch_size),
                                         use_multiprocessing=True)
    sample_dataqueue.start(workers=2, max_queue_size=10)
    sample_gen = sample_dataqueue.get()

    # targets for loss function
    gan_sample_dataqueue = GeneratorEnqueuer(
        generate_latent_points_as_generator(batch_size),
        use_multiprocessing=True)
    gan_sample_dataqueue.start(workers=2, max_queue_size=10)
    gan_sample_gen = gan_sample_dataqueue.get()

    # targets for loss function
    valid = -np.ones((batch_size, 1))
    fake = np.ones((batch_size, 1))
    dummy = np.zeros((batch_size, 1))  # Dummy gt for gradient penalty

    bat_per_epo = int(n_samples / batch_size)

    # we need to call the discriminator once in order
    # to initialize the input shapes
    [X_real, cond_real] = next(sample_gen)
    latent = np.random.normal(size=(batch_size, latent_dim))
    critic_model.predict([X_real, cond_real, latent])
    for i in trange(n_epochs):
        epoch = 1 + i + start_epoch
        # enumerate batches over the training set
        for j in trange(bat_per_epo):

            for _ in range(n_disc):
                # fetch a batch from the queue
                [X_real, cond_real] = next(sample_gen)
                latent = np.random.normal(size=(batch_size, latent_dim))
                d_loss = critic_model.train_on_batch(
                    [X_real, cond_real, latent], [valid, fake, dummy])
                # we get for losses back here. average, valid, fake, and gradient_penalty
                # we want the average of valid and fake
                d_loss = np.mean([d_loss[1], d_loss[2]])

            # train generator
            # prepare points in latent space as input for the generator
            [latent, cond] = next(gan_sample_gen)
            # update the generator via the discriminator's error
            g_loss = generator_model.train_on_batch([latent, cond], valid)
            # summarize loss on this batch
            print(f'{epoch}, {j + 1}/{bat_per_epo}, d_loss {d_loss}' + \
                  f' g:{g_loss} ')  # , d_fake:{d_loss_fake} d_real:{d_loss_real}')

            if np.isnan(g_loss) or np.isnan(d_loss):
                raise ValueError('encountered nan in g_loss and/or d_loss')

            hist['d_loss'].append(d_loss)
            hist['g_loss'].append(g_loss)

        # plot generated examples
        plt.figure(figsize=(25, 25))
        n_plot = 30
        X_fake, cond_fake = generate_fake_samples(n_plot)
        for iplot in range(n_plot):
            plt.subplot(n_plot, 25, iplot * 25 + 1)
            plt.imshow(cond_fake[iplot, :, :].squeeze(),
                       cmap=plt.cm.gist_earth_r,
                       norm=LogNorm(vmin=0.01, vmax=1))
            plt.axis('off')
            for jplot in range(1, 24):
                plt.subplot(n_plot, 25, iplot * 25 + jplot + 1)
                plt.imshow(X_fake[iplot, jplot, :, :].squeeze(),
                           vmin=0,
                           vmax=1,
                           cmap=plt.cm.hot_r)
                plt.axis('off')
        plt.colorbar()
        plt.suptitle(f'epoch {epoch:04d}')
        plt.savefig(
            f'{plotdir}/fake_samples_{params}_{epoch:04d}_{j:06d}.{plot_format}'
        )

        # plot loss
        plt.figure()
        plt.plot(hist['d_loss'], label='d_loss')
        plt.plot(hist['g_loss'], label='g_loss')
        plt.ylabel('batch')
        plt.legend()
        plt.savefig(f'{plotdir}/training_loss_{params}.{plot_format}')
        pd.DataFrame(hist).to_csv('hist.csv')
        plt.close('all')

        generator.save(f'{outdir}/gen_{params}_{epoch:04d}.h5')
        critic.save(f'{outdir}/disc_{params}_{epoch:04d}.h5')
示例#36
0
# to have the starting points in the history is as well
posH = np.r_[my_dict['pts_at_0'],posH]
 
# make sure the ending points are the same for both methods
T = my_dict['nTimeSteps']
if np.any((posH[(T)*N : (T+1)*N,:]-posT) > 1e-6):
  print (posH[(T)*N : (T+1)*N,:]-posT)
  raise ValueError
#pdb.set_trace()

#print posT.shape
#print posT.T

if dim == 1:
  fig=plt.figure()
  plt.subplot(5,1,1)
  plt.plot(np.arange(posT.size),my_dict['pts_at_0'][:,0],'.r')
  plt.subplot(5,1,2)
  plt.plot(np.arange(posT.size),posT[:,0],'.b',label='GPU')
  plt.legend()
  plt.subplot(5,1,3)
  #plt.plot(np.arange(posT.size),my_dict['CPU_results'][0,:],'.r',label='CPU')
  plt.plot(np.arange(posT.size),posT[:,0],'.b')
  plt.legend()
  plt.subplot(5,1,4)
  plt.plot(np.arange(v.size),v[:,0],'.b',label='GPU velocities')
  plt.legend()
  plt.subplot(5,1,5)
  for i in range(0,N,32):
#    print posH[i::N].shape
    plt.plot(np.ones(T+1)*i,posH[i::N],'r-x')
示例#37
0
def main(filename, ylim = (0,15), xlim = (-40,20)):
    # construct experiment from header
    import spk2_mp
    conds, stims, mps, cells = spk2_mp.main(filename)
    
    conds = on_off_evnts(conds['name'], conds['times'])
    stims = on_off_evnts(stims['name'], stims['times'])

    prtrct = on_off_evnts('prtrct', mps['prtrct'])
    retrct = on_off_evnts('retrct', mps['retrct'])
    mps = motor_programs(prtrct, retrct)

    expt = experiment()
    expt.add_motor_programs(mps)
    expt.add_cbi2_stims(stims)
    expt.add_condition(conds)
    
    for cell_name in cells.keys():
        tmpcell = ap_cell(cell_name)
        tmpcell.add_spk_times(cells[cell_name])
        expt.add_cell(tmpcell)

    # now getting into plotting and the like, have to split this off somehow.
    # figure out max num prgs in any one condition
    max_prgs = max([cnd._num_prgs for cnd in expt])
    import matplotlib.gridspec as gridspec
    nrow = max_prgs
    ncol = 3
    plt_counter = 0
    gs = gridspec.GridSpec(nrow,ncol)

    # plot b48
    # save the bottom row for the averages (row-1)
    from pylab import plt
    for col_num, cond in enumerate(expt):
        if col_num == 0: left_most = True
        else: left_most = False
        for row_num, prog in enumerate(cond):
            if row_num == (cond._num_prgs - 1): bottom_most = True
            else: bottom_most = False
            ax = plt.subplot(gs[row_num,col_num])
            quick_plot(expt, prog, 'b48', ax,
                       left_most = left_most, bottom_most = bottom_most,
                       ylim = ylim, xlim = xlim)
            if row_num == nrow - 1:
                break
    expt.b48_fig = plt.gcf()
    expt.b48_fig.set_size_inches((7.5,10))
    
    # plot both b48 and b8
    # save the bottom row for the averages (row-1)
    for col_num, cond in enumerate(expt):
        if col_num == 0: left_most = True
        else: left_most = False
        for row_num, prog in enumerate(cond):
            if row_num == (cond._num_prgs - 1): bottom_most = True
            else: bottom_most = False
            ax = plt.subplot(gs[row_num,col_num])
            quick_plot(expt, prog, 'b48', ax,
                       left_most = left_most, bottom_most = bottom_most,
                       ylim = ylim, xlim = xlim)
            quick_plot(expt, prog, 'b8', ax,
                       left_most = left_most, bottom_most = bottom_most,
                       ylim = ylim, xlim = xlim)
            # if row_num == nrow:
            #     break
    expt.b8_b48_fig = plt.gcf()
    expt.b8_b48_fig.set_size_inches((7.5,10))
    return (expt)
示例#38
0
def bars(scheme, verbose=None, norm='load'):
    """
    Figure to compare link proportional and usage proportional for a single
    scheme and put them in ./sensitivity/figures/scheme/
    """
    # Load data and results
    F = abs(np.load('./results/' + scheme + '-flows.npy'))
    quantiles = np.load('./results/quantiles_' + scheme + '_' + str(lapse) + '.npy')
    nNodes = 30

    names = node_namer(N)  # array of node labels
    links = range(len(F))
    nodes = np.linspace(0.5, 2 * nNodes - 1.5, nNodes)
    nodes_shift = nodes + .5

    for direction in directions:
        N_usages = np.load('./results/Node_contrib_' + scheme + '_' + direction + '_' + str(lapse) + '.npy')

        # Compare node transmission to mean load
        if verbose:
            print('Plotting node comparison - ' + scheme + ' - ' + direction)
        # sort node names for x-axis
        Total_usage = np.sum(N_usages, 1)
        node_ids = np.array(range(len(N))).reshape((len(N), 1))
        node_mean_load = [n.mean for n in N]

        # Vector for normalisation
        if norm == 'cap':
            normVec = np.ones(nNodes) * sum(quantiles)
        else:
            normVec = node_mean_load

        # Calculate node proportional
        EU_load = np.sum(node_mean_load)
        Total_caps = sum(quantiles)
        Node_proportional = node_mean_load / EU_load * Total_caps / normVec
        Node_proportional = np.reshape(Node_proportional, (len(Node_proportional), 1))

        # Calculate link proportional
        link_proportional = linkProportional(N, link_dic, quantiles)
        link_proportional = [link_proportional[i] / normVec[i] for i in range(nNodes)]

        # Calculate old usage proportional
        if direction == 'combined':
            old_usages = np.load('./linkcolouring/old_' + scheme + '_copper_link_mix_import_all_alpha=same.npy')
            old_usages += np.load('./linkcolouring/old_' + scheme + '_copper_link_mix_export_all_alpha=same.npy')
        else:
            old_usages = np.load('./linkcolouring/old_' + scheme + '_copper_link_mix_' + direction + '_all_alpha=same.npy')
        avg_node_usage = np.sum(np.sum(old_usages, axis=2), axis=0) / 70128.
        avg_EU_usage = np.sum(np.sum(np.sum(old_usages, axis=2), axis=0)) / 70128.
        avg_node_usage /= avg_EU_usage
        avg_node_usage /= normVec
        avg_node_usage *= 500000

        # Calculate usage and sort countries by mean load
        normed_usage = Total_usage / normVec
        normed_usage = np.reshape(normed_usage, (len(normed_usage), 1))
        node_mean_load = np.reshape(node_mean_load, (len(node_mean_load), 1))
        data = np.hstack([normed_usage, node_ids, node_mean_load, link_proportional, Node_proportional])
        data_sort = data[data[:, 2].argsort()]
        names_sort = [names[int(i)] for i in data_sort[:, 1]]
        # flip order so largest is first
        names_sort = names_sort[::-1]
        link_proportional = data_sort[:, 3][::-1]
        Node_proportional = data_sort[:, 4][::-1]
        data_sort = data_sort[:, 0][::-1]

        plt.figure(figsize=(10, 4), facecolor='w', edgecolor='k')
        ax = plt.subplot(111)
        green = '#009900'
        blue = '#000099'

        # Plot node proportional
        plt.rc('lines', lw=2)
        plt.rc('lines', dash_capstyle='round')
        plt.plot(np.linspace(0, len(N) * 2 + 2, len(N)), Node_proportional, '--k')
        # Plot link proportional
        #plt.bar(nodes, link_proportional, width=1, color=green, edgecolor='none')
        # Plot old usage proportional
        plt.bar(nodes, avg_node_usage[loadOrder], width=1, color=green, edgecolor='none')
        # Plot usage proportional
        plt.bar(nodes_shift, data_sort, width=1, color=blue, edgecolor='none')

        # Magic with ticks and labels
        ax.set_xticks(np.linspace(2, len(N) * 2 + 2, len(N) + 1))
        ax.set_xticklabels(names_sort, rotation=60, ha="right", va="top", fontsize=10.5)

        ax.xaxis.grid(False)
        ax.xaxis.set_tick_params(width=0)
        if norm == 'cap':
            ax.set_ylabel(r'$M_n/ \mathcal{K}^T$')
        else:
            # ax.set_ylabel(r'Network usage [MW$_T$/MW$_L$]')
            ax.set_ylabel(r'$M_n/\left\langle L_n \right\rangle$')
        maxes = [max(avg_node_usage), max(data_sort)]
        plt.axis([0, nNodes * 2 + .5, 0, 1.15 * max(maxes)])

        # Legend
        artists = [plt.Line2D([0, 0], [0, 0], ls='dashed', lw=2.0, c='k'), plt.Rectangle((0, 0), 0, 0, ec=green, fc=green), plt.Rectangle((0, 0), 0, 0, ec=blue, fc=blue)]
        LABS = ['$M^1$', '$M^{3}_{old}$', '$M^{3}_{new}$']
        leg = plt.legend(artists, LABS, loc='upper left', ncol=len(artists), columnspacing=0.6, borderpad=0.4, borderaxespad=0.0, handletextpad=0.2, handleheight=1.2)
        leg.get_frame().set_alpha(0)
        leg.get_frame().set_edgecolor('white')
        ltext = leg.get_texts()
        plt.setp(ltext, fontsize=12)    # the legend text fontsize

        plt.savefig(figPath + scheme + '/network-usage-' + direction + '-' + norm + '.png', bbox_inches='tight')
        if verbose:
            print('Saved figures to ./figures/compareUsage/' + scheme + '/network-usage-' + direction + '-' + norm + '.png')
示例#39
0
#Set up colors using palettable instead
sc = palettable.colorbrewer.qualitative.Set2_8.mpl_colors

#W1 line function
col = np.linspace(-4,4,100)
ch2prime = 20.5-col

print "plotting"

#tdata = Table.read('GTR-ADM-QSO-ir-testhighz3fall_kdephotoz_lup_trainingset_qsos_colors.dat', format='ascii') #high-z quasar colors

tdata = pf.open('GTR-ADM-QSO-ir-testhighz_kdephotoz_lup_2014_trainingset_qsos.fits')[1].data

fig=plt.figure(2,figsize=[10.0,10.0])
ax2=plt.subplot(1,1,1)

majorLocator = MultipleLocator(1)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(0.25)




#SpIES DATA
#hex_contour(s5color,s5mag2, levels=[0.99,0.95,0.9,0.7,0.5,0.3,0.1], std=True, min_cnt=50, smoothing=2, hkwargs={'gridsize':100,'extent':(-4,4,13,27)}, skwargs={'color':Sps,'alpha':0.1,'marker':'.'}, ckwargs={'colors':Spc,'alpha':1,'linewidths':2})
#Star DATA
#hex_contour(irstarcolor,ch2starmag, levels=[0.99,0.95,0.9,0.7,0.5,0.3,0.1], std=True, min_cnt=10, smoothing=4, hkwargs={'gridsize':50}, skwargs={'color':sts,'alpha':0.1,'marker':'.'}, ckwargs={'colors':stc,'alpha':1,'linewidths':2})
#Low-Redshift Quasars
#hex_contour(irlzcolor,ch2lzmag, levels=[0.95,0.9,0.7,0.5,0.3,0.1], std=True, min_cnt=10, smoothing=2, hkwargs={'gridsize':25}, skwargs={'color':lzs,'alpha':0.1,'marker':'.'}, ckwargs={'colors':lzc,'alpha':1,'linewidths':2})
示例#40
0
            mayavi_mlab_figure_bgwhite('src')
            mayavi_mlab_set_parallel_projection(True)
            mayavi_mlab_figure_bgwhite('transformed')
            mayavi_mlab_clf()
            mayavi_mlab_set_parallel_projection(True)
            x0,y0,z0=pts.cpu.T
            
            mayavi_mlab_figure('src')
            points3d(x0,y0,z0,scale_factor=5,color=red)
            x1,y1,z1=pts_transformed.cpu.T
            mayavi_mlab_figure('transformed')
            points3d(x1,y1,z1,scale_factor=5,color=blue)


    


#
    if 0:
        plt.close('all')
        for c in range(3):
            plt.figure(c+1)
            for i in range( min(21,pts_grid[0].shape[2])):
                plt.subplot(3,7,i+1)
                plt.imshow(v[:,c].reshape(pts_grid[0].shape)[:,:,i],
                           interpolation='Nearest',vmin=v.min(),vmax=v.max());
                           #plt.colorbar()
    print cpa_space._calcs_gpu.kernel_filename
    if computer.has_good_gpu_card:
        raw_input("raw_input:")                      
        velTess[1:-1, 0] = (2 * np.random.rand(Nv - 2) - 1)

        velTess /= nC / 10

    else:
        tw.sample_gaussian_velTess(level=0,
                                   Avees=cpa_space.Avees,
                                   velTess=velTess,
                                   mu=None)

    tw.update_pat_from_velTess(velTess, level=0)
    tw.calc_v(level=0)
    tw.v_dense.gpu2cpu()

    plt.clf()
    plt.subplot(221)
    plt.plot(tw.x_dense.cpu.ravel(), tw.v_dense.cpu.ravel(), 'b-')
    plt.grid('on')
    plt.title(r'$v(x)$')

    plt.plot(x_tess, velTess, 'ro')

    #    pts_fwd_cf =  closed_form_int.calc_phi(0.6-0.0001,velTess=velTess,t=1.0)
    #
    #    print
    #    print 'pts_fwd_cf',pts_fwd_cf
    #

    x = tw.x_dense.cpu.ravel()
    pts_fwd_cf = np.zeros_like(x)
    nPts = len(x)
示例#42
0
文件: plotting.py 项目: 0x0all/rep
 def _plot(self):
     for i, plotter in enumerate(self.plots):
         plt.subplot(self.rows, self.columns, i + 1)
         plotter.plot(fontsize=self.fontsize_, show_legend=self.show_legend_)
# Generate images based of noise
img = generator([z_gen, label])
# Discriminator determines validity
valid = critic([img, label])
# Defines generator model
generator_model = tf.keras.Model([z_gen, label], valid)
generator_model.compile(loss=wasserstein_loss, optimizer=optimizer)
print('finished building networks')

# plot some real samples
# plot a couple of samples
plt.figure(figsize=(25, 25))
n_plot = 30
[X_real, cond_real] = next(generate_real_samples(n_plot))
for i in range(n_plot):
    plt.subplot(n_plot, 25, i * 25 + 1)
    plt.imshow(cond_real[i, :, :].squeeze(),
               cmap=plt.cm.gist_earth_r,
               norm=LogNorm(vmin=0.01, vmax=1))
    plt.axis('off')
    for j in range(1, 24):
        plt.subplot(n_plot, 25, i * 25 + j + 1)
        plt.imshow(X_real[i, j, :, :].squeeze(),
                   vmin=0,
                   vmax=1,
                   cmap=plt.cm.hot_r)
        plt.axis('off')
plt.colorbar()
plt.savefig(f'{plotdir}/real_samples.{plot_format}')

hist = {'d_loss': [], 'g_loss': []}
def autocorrelate(data, unbias=2, normalize=2, plot_test=False):
    """
    Compute the autocorrelation coefficients for time series data.

    Here we use scipy.signal.correlate, but the results are the same as in
    Yang, et al., 2012 for unbias=1:
    "The autocorrelation coefficient refers to the correlation of a time
    series with its own past or future values. iGAIT uses unbiased
    autocorrelation coefficients of acceleration data to scale the regularity
    and symmetry of gait.
    The autocorrelation coefficients are divided by fc(0) in Eq. (6),
    so that the autocorrelation coefficient is equal to 1 when t=0 ::
        NFC(t) = fc(t) / fc(0)
    Here NFC(t) is the normalised autocorrelation coefficient, and fc(t) are
    autocorrelation coefficients."

    Parameters
    ----------
    data : numpy array
        time series data
    unbias : integer or None
        unbiased autocorrelation: divide by range (1) or by weighted range (2)
    normalize : integer or None
        normalize: divide by 1st coefficient (1) or by maximum abs. value (2)
    plot_test : Boolean
        plot?

    Returns
    -------
    coefficients : numpy array
        [normalized, unbiased] autocorrelation coefficients
    N : integer
        number of coefficients

    Examples
    --------
    >>> import numpy as np
    >>> from mhealthx.signals import autocorrelate
    >>> data = np.random.random(100)
    >>> unbias = 2
    >>> normalize = 2
    >>> plot_test = True
    >>> coefficients, N = autocorrelate(data, unbias, normalize, plot_test)

    """
    import numpy as np
    from scipy.signal import correlate

    # Autocorrelation:
    coefficients = correlate(data, data, 'full')
    coefficients = coefficients[coefficients.size / 2:]
    N = coefficients.size

    # Plot:
    if plot_test:
        from pylab import plt
        t = np.linspace(0, N, N)
        plt.figure()
        plt.subplot(3, 1, 1)
        plt.plot(t, coefficients, 'k-', label='coefficients')
        plt.title('coefficients')

    # Unbiased:
    if unbias:
        if unbias == 1:
            coefficients /= (N - np.arange(N))
        elif unbias == 2:
            coefficient_ratio = coefficients[0] / coefficients[-1]
            coefficients /= np.linspace(coefficient_ratio, 1, N)
        else:
            raise IOError("unbias should be set to 1, 2, or None")
        # Plot:
        if plot_test:
            plt.subplot(3, 1, 2)
            plt.plot(t, coefficients, 'k-', label='coefficients')
            plt.title('unbiased coefficients')

    # Normalize:
    if normalize:
        if normalize == 1:
            coefficients /= np.abs(coefficients[0])
        elif normalize == 2:
            coefficients /= np.max(np.abs(coefficients))
        else:
            raise IOError("normalize should be set to 1, 2, or None")
        # Plot:
        if plot_test:
            plt.subplot(3, 1, 3)
            plt.plot(t, coefficients, 'k-', label='coefficients')
            plt.title('normalized coefficients')

    # Plot:
    if plot_test:
        plt.show()

    return coefficients, N
def example(img=None,tess='I',eval_cell_idx=True,eval_v=True,show_downsampled_pts=True,
            valid_outside=True,base=[1,1],
            scale_spatial=.1,
            scale_value=100,
            permute_cell_idx_for_display=True,
            nLevels=3,
            vol_preserve=False,
            zero_v_across_bdry=[0,0],
            use_lims_when_plotting=True):
          
    show_downsampled_pts = bool(show_downsampled_pts)
    eval_cell_idx = bool(eval_cell_idx)
    eval_v = bool(eval_cell_idx)
    valid_outside = bool(valid_outside)
    permute_cell_idx_for_display = bool(permute_cell_idx_for_display)
    vol_preserve = bool(vol_preserve)
    
    if img is None:
        img =  Img(get_std_test_img())
    else:
        img=Img(img)
        img = img[:,:,::-1] # bgr2rgb
        
        
    
    tw = TransformWrapper(nRows=img.shape[0],
                          nCols=img.shape[1],
                          nLevels=nLevels,  
                          base=base,
                          scale_spatial=scale_spatial, # controls the prior's smoothness
                          scale_value=scale_value, # controls the prior's variance
                          tess=tess,
                          vol_preserve=vol_preserve,
                          zero_v_across_bdry=zero_v_across_bdry,
                          valid_outside=valid_outside)
    print tw
         
     
    # You probably want to do that: padding image border with zeros
    border_width=1
    img[:border_width]=0
    img[-border_width:]=0
    img[:,:border_width]=0
    img[:,-border_width:]=0      
    
    # The tw.calc_T_fwd (or tw.calc_T_inv) is always done in gpu.
    # After using it to compute new pts, 
    # you may want to use remap (to warp an image accordingly). 
    # If you will use tw.remap_fwd (or tw.remap_inv), which is done in gpu,
    # then the image type can be either float32 or float64.
    # But if you plan to use tw.tw.remap_fwd_opencv (or tw.remap_inv_opencv),
    # which is done in cpu (hence slightly lower) but supports better 
    # interpolation methods, then the image type must be np.float32.
    
#    img_original = CpuGpuArray(img.copy().astype(np.float32))
    img_original = CpuGpuArray(img.copy().astype(np.float64))
    
    img_wrapped_fwd= CpuGpuArray.zeros_like(img_original)
    img_wrapped_bwd= CpuGpuArray.zeros_like(img_original)
    
     
    seed=0
    np.random.seed(seed)    
               
    ms_Avees=tw.get_zeros_PA_all_levels()
    ms_theta=tw.get_zeros_theta_all_levels() 
    
    for level in range(tw.ms.nLevels):  
        if level==0:
            tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=None)# zero mean
        else:
            tw.sample_from_the_ms_prior_coarse2fine_one_level(ms_Avees,ms_theta,
                                                                level_fine=level)                
    
    
    print('\nimg shape: {}\n'.format(img_original.shape))

    # You don't have use these. You can use any 2d array 
    # that has two columns (regardless of the number of rows).
    pts_src = tw.pts_src_dense
    
    # Create buffers for the output
    pts_fwd = CpuGpuArray.zeros_like(pts_src) 
    pts_inv = CpuGpuArray.zeros_like(pts_src)  

   
    for level in range(tw.ms.nLevels):
        
        
        #######################################################################
        # instead of the tw.sample_from_the_ms_prior() above,
        # you may want to use one of the following.        
        # 1)
        # tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=None)# zero mean
        # 2)
        # tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=some_user_specified_mu)
        # The following should be used only for level>0 :
        # 3)
        # tw.sample_normal_in_one_level_using_the_coarser_as_mean(Avees_coarse=ms_Avees[level-1], 
        #                                                        Avees_fine=ms_Avees[level],
        #                                                        theta_fine=ms_theta[level], 
        #                                                        level_fine=level)
        #
        #######################################################################
        
        
#        You can also change the values this way:
#         cpa_space = tw.ms.L_cpa_space[level]
#        theta = cpa_space.get_zeros_theta()
#        theta[:] = some values
#        Avees = cpa_space.get_zeros_PA()
#        cpa_space.theta2Avees(theta,Avees)
#        cpa_space.update_pat(Avees)         
              
        
        # This step is important and must be done 
        # before are trying to "use" the new values of 
        # the (vectorized) A's. 
        tw.update_pat_from_Avees(ms_Avees[level],level) 
        
     
        if eval_v:
            # Evaluating the velocity field. 
            # You don't have to do it in unless you want to visualize v.
            # (when evaluting the treansformation, v will be internally 
            # evaluated anyway -- but its result won't be stored)
            tw.calc_v(level=level) 
        
    

        
        
        # optional, if you want to time it
        timer_gpu_T_fwd = GpuTimer()           
        
        # Simply calling 
        #   tic = time.clock()
        # and then 
        #   tic = time.clock()
        # won't work.
        # In fact, most likely you will get that toc-tic is zero.
        # You need to use the GpuTimer object. When you do that, 
        # one side effect is that suddenly the toc-tic from above will
        # give you a more realistic result.
        
        
        tic = time.clock() 
        timer_gpu_T_fwd.tic()
        tw.calc_T_fwd(pts_src,pts_fwd,level=level)
        timer_gpu_T_fwd.toc()   
        toc = time.clock()

        print 'Time, in sec, for computing T_fwd:'           
        print timer_gpu_T_fwd.secs
        print toc-tic  # likely to be 0, unless you also used the GpuTimer.
        
        # You can also time the inv of course. Results will be similar.
        tw.calc_T_inv(pts_src,pts_inv,level=level)  
             
        if eval_cell_idx:   
            # cell_idx is computed here just for display. 
            cell_idx = CpuGpuArray.zeros(len(pts_src),dtype=np.int32)
            tw.calc_cell_idx(pts_src,cell_idx,level,
                             permute_for_disp=permute_cell_idx_for_display)
 

        # If may also want ro to time the remap.
        # However, the remap is usually very fast (e.g, about 2 milisec).
#            timer_gpu_remap_fwd = GpuTimer()  
#            tic = time.clock()
#            timer_gpu_remap_fwd.tic()
#        tw.remap_fwd(pts_inv=pts_inv,img=img_original,img_wrapped_fwd=img_wrapped_fwd)
        tw.remap_fwd(pts_inv=pts_inv,img=img_original,img_wrapped_fwd=img_wrapped_fwd)
#            timer_gpu_remap_fwd.toc()   
#            toc = time.clock()   

        # If the img type is np.float32, you may also use 
        # tw.remap_fwd_opencv instead of tw.remap_fw. The differences between
        # the two methods are explained above  
                   
        
        
        tw.remap_inv(pts_fwd=pts_fwd,img=img_original,img_wrapped_inv=img_wrapped_bwd)
        
    
        # For display purposes, do gpu2cpu transfer
        print ("For display purposes, do gpu2cpu transfer")
        if eval_cell_idx:        
            cell_idx.gpu2cpu()  
            


            
            
            
        if eval_v:
            tw.v_dense.gpu2cpu() 
        pts_fwd.gpu2cpu()
        pts_inv.gpu2cpu()
        img_wrapped_fwd.gpu2cpu()
        img_wrapped_bwd.gpu2cpu()
        
        figsize = (12,12)
        plt.figure(figsize=figsize)

         
        if eval_v: 
            plt.subplot(332)
            tw.imshow_vx() 
            plt.title('vx')
            plt.subplot(333)
            tw.imshow_vy()   
            plt.title('vy') 
        
        if eval_cell_idx:
            plt.subplot(331)
            cell_idx_disp = cell_idx.cpu.reshape(img.shape[0],-1)
            plt.imshow(cell_idx_disp)
            plt.title('tess (type {})'.format(tess))
        
        if show_downsampled_pts:
            ds=20
            pts_src_grid = pts_src.cpu.reshape(tw.nRows,-1,2)
            pts_src_ds=pts_src_grid[::ds,::ds].reshape(-1,2)
            pts_fwd_grid = pts_fwd.cpu.reshape(tw.nRows,-1,2)
            pts_fwd_ds=pts_fwd_grid[::ds,::ds].reshape(-1,2)
            pts_inv_grid = pts_inv.cpu.reshape(tw.nRows,-1,2)
            pts_inv_ds=pts_inv_grid[::ds,::ds].reshape(-1,2)
        
           
            use_lims=use_lims_when_plotting
#            return tw
            plt.subplot(334)    
            plt.plot(pts_src_ds[:,0],pts_src_ds[:,1],'r.')
            plt.title('pts ds')
            tw.config_plt()
            plt.subplot(335)
            plt.plot(pts_fwd_ds[:,0],pts_fwd_ds[:,1],'g.')
            plt.title('fwd(pts)')
            tw.config_plt(axis_on_or_off='on',use_lims=use_lims)
            plt.subplot(336)
            plt.plot(pts_inv_ds[:,0],pts_inv_ds[:,1],'b.')
            plt.title('inv(pts)')
            tw.config_plt(axis_on_or_off='on',use_lims=use_lims)
         
                        
        plt.subplot(337)
        plt.imshow(img_original.cpu.astype(np.uint8))
        plt.title('img')
#        plt.axis('off') 
        plt.subplot(338)
        plt.imshow(img_wrapped_fwd.cpu.astype(np.uint8))
#        plt.axis('off') 
        plt.title('fwd(img)')
        plt.subplot(339)
        plt.imshow(img_wrapped_bwd.cpu.astype(np.uint8))    
#        plt.axis('off') 
        plt.title('inv(img)')
    
    
    return tw
示例#46
0
def calcRMS(log, directory, subdir):
    file = open("/home/superjax/Documents/inertial_sense/config.yaml", 'r')
    config = yaml.load(file)
    directory = config["directory"]
    serials = config['serials']

    numDev = len(log.devices)
    debug = True
    np.set_printoptions(linewidth=200)
    averageRMS = []
    compassing = False
    navMode = (log.devices[0].data['ins2']['iStatus'] & 0x1000)[-1]
    if numDev > 1:

        print("\nComputing RMS Accuracies: (%d devices)" % (numDev))

        # Build a 3D array of the data.  idx 0 = Device,    idx 1 = t,     idx 2 = [t, lla, uvw, log(q)]
        data = [
            np.hstack((log.devices[i].data['ins2']['tow'][:, None],
                       log.devices[i].data['ins2']['lla'],
                       log.devices[i].data['ins2']['uvw'],
                       log.devices[i].data['ins2']['q']))
            for i in range(numDev)
        ]

        # Make sure that the time stamps are realistic
        for dev in range(numDev):
            if (np.diff(data[dev][:, 0]) > 10.0).any():
                print("large gaps in data for dev", dev,
                      "chopping off data before gap".format(dev))
                idx = np.argmax(np.diff(data[dev][:, 0])) + 1
                data[dev] = data[dev][idx:, :]

        min_time = max([np.min(data[i][:, 0]) for i in range(numDev)])
        max_time = min([np.max(data[i][:, 0]) for i in range(numDev)])

        # If we are in compassing mode, then only calculate RMS after all devices have fix
        if log.devices[0].data['flashCfg']['RTKCfgBits'][-1] == 8:
            compassing = True
            time_of_fix_ms = [
                dev.data['gps1RtkCmpRel']['timeOfWeekMs'][np.argmax(
                    dev.data['gps1RtkCmpRel']['arRatio'] > 3.0)] / 1000.0
                for dev in log.devices
            ]
            # print time_of_fix_ms
            min_time = max(time_of_fix_ms)

        # only take the second half of the data
        min_time = max_time - (max_time - min_time) / 2.0

        # Resample at a steady 100 Hz
        dt = 0.01
        t = np.arange(1.0, max_time - min_time - 1.0, dt)
        for i in range(numDev):
            # Chop off extra data at beginning and end
            data[i] = data[i][data[i][:, 0] > min_time]
            data[i] = data[i][data[i][:, 0] < max_time]

            # Chop off the min time so everything is wrt to start
            data[i][:, 0] -= min_time

            # Interpolate data so that it has all the same timestamps
            fi = interp1d(data[i][:, 0],
                          data[i][:, 1:].T,
                          kind='cubic',
                          fill_value='extrapolate',
                          bounds_error=False)
            data[i] = np.hstack((t[:, None], fi(t).T))

            # Normalize Quaternions
            data[i][:, 7:] /= norm(data[i][:, 7:], axis=1)[:, None]

        # Make a big 3D numpy array we can work with [dev, sample, data]
        data = np.array(data)

        # Convert lla to ned using first device lla at center of data as reference
        refLla = data[0, int(round(len(t) / 2.0)), 1:4].copy()
        for i in range(numDev):
            data[i, :, 1:4] = lla2ned(refLla, data[i, :, 1:4])

        # Find Mean Data
        means = np.empty((len(data[0]), 10))
        means[:, :6] = np.mean(
            data[:, :, 1:7],
            axis=0)  # calculate mean position and velocity across devices
        means[:, 6:] = meanOfQuatArray(data[:, :, 7:].transpose(
            (1, 0,
             2)))  # Calculate mean attitude of all devices at each timestep

        # calculate the attitude error for each device
        att_error = np.array([
            qboxminus(data[dev, :, 7:], means[:, 6:]) for dev in range(numDev)
        ])
        # Calculate the Mounting Bias for all devices (assume the mounting bias is the mean of the attitude error)
        mount_bias = np.mean(att_error, axis=1)
        if compassing:
            # When in compassing, assume all units are sharing the same GPS antennas and should therefore have
            # no mounting bias in heading
            mount_bias[:, 2] = 0

        # Adjust all attitude errors to the mean by the mounting bias
        # TODO: Talk to Walt about the mount bias - because this probably includes more biases than just the mount bias
        att_error -= mount_bias[:, None, :]

        if debug:
            colors = ['r', 'g', 'b', 'm']
            plt.figure()
            plt.subplot(3, 1, 1)  # Position
            plt.title("position error")
            for m in range(3):
                for n in range(numDev):
                    plt.plot(data[n, :, 0], data[n, :, m + 1], color=colors[m])
                plt.plot(data[0, :, 0],
                         means[:, m],
                         linewidth=2,
                         color=colors[m])
            plt.subplot(3, 1, 2)
            plt.title("velocity error")
            for m in range(3):
                for n in range(numDev):
                    plt.plot(data[n, :, 0], data[n, :, m + 4], color=colors[m])
                plt.plot(data[0, :, 0],
                         means[:, m + 3],
                         linewidth=2,
                         color=colors[m])
            plt.subplot(3, 1, 3)
            plt.title("attitude")
            for m in range(4):
                for n in range(numDev):
                    plt.plot(data[n, :, 0], data[n, :, m + 7], color=colors[m])
                plt.plot(data[0, :, 0],
                         means[:, m + 6],
                         linewidth=2,
                         color=colors[m])

            plt.figure()
            for m in range(3):
                plt.subplot(3, 1, m + 1)
                for n in range(numDev):
                    plt.plot(att_error[n, :, m])
            plt.show()

        # RMS = sqrt ( 1/N sum(e^2) )
        RMS = np.empty((numDev, 9))
        # Calculate RMS for position and velocity
        RMS[:, :6] = np.sqrt(
            np.mean(np.square(data[:, :, 1:7] - means[:, 0:6]), axis=1))
        # Calculate RMS for attitude
        RMS[:, 6:] = np.sqrt(np.mean(np.square(att_error[:, :, :]), axis=1))

        # Average RMS across devices
        averageRMS = np.mean(RMS, axis=0)

        print("average RMS = ", averageRMS)

        # Convert Attitude Error To Euler Angles
        RMS_euler = RMS[:, 6:]  # quat2eulerArray(qexp(RMS[:,6:]))
        averageRMS_euler = averageRMS[
            6:]  #quat2eulerArray(qexp(averageRMS[None,6:]))[0]
        mount_bias_euler = mount_bias  #quat2eulerArray(qexp(mount_bias))

        # Below is creating the RMS report
        thresholds = np.array([
            0.2,
            0.2,
            0.2,  # LLA
            0.2,
            0.2,
            0.2,  # UVW
            0.1,
            0.1,
            2.0
        ])  # ATT (rpy) - (deg)
        if navMode or compassing:
            thresholds[8] = 0.3  # Higher heading accuracy
        else:
            thresholds[:6] = np.inf

        thresholds[6:] *= DEG2RAD  # convert degrees threshold to radians

        specRatio = averageRMS / thresholds

        filename = os.path.join(directory, 'RMS_report_new.txt')
        f = open(filename, 'w')
        f.write('*****   Performance Analysis Report - %s   *****\n' %
                (subdir))
        f.write('\n')
        f.write('Directory: %s\n' % (directory))
        mode = "AHRS"
        if navMode: mode = "NAV"
        if compassing: mode = "DUAL GNSS"
        f.write("\n")

        # Print Table of RMS accuracies
        line = 'Device       '
        if navMode:
            f.write(
                '--------------------------------------------------- RMS Accuracy -------------------------------------------\n'
            )
            line = line + 'UVW[  (m/s)   (m/s)   (m/s) ],  NED[    (m)     (m)     (m) ],'
        else:  # AHRS mode
            f.write('-------------- RMS Accuracy --------------\n')
        line = line + ' Att [  (deg)   (deg)   (deg) ]\n'
        f.write(line)

        for n in range(0, numDev):
            devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])
            line = '%2d SN%d      ' % (n, devInfo.v['serialNumber'][-1])
            if navMode:
                line = line + '[ %6.4f  %6.4f  %6.4f ],     ' % (
                    RMS[n, 3], RMS[n, 4], RMS[n, 5])
                line = line + '[ %6.4f  %6.4f  %6.4f ],     ' % (
                    RMS[n, 0], RMS[n, 1], RMS[n, 2])
            line = line + '[ %6.4f  %6.4f  %6.4f ]\n' % (
                RMS_euler[n, 0] * RAD2DEG, RMS_euler[n, 1] * RAD2DEG,
                RMS_euler[n, 2] * RAD2DEG)
            f.write(line)

        line = 'AVERAGE:        '
        if navMode:
            f.write(
                '------------------------------------------------------------------------------------------------------------\n'
            )
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (
                averageRMS[3], averageRMS[4], averageRMS[5])
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (
                averageRMS[0], averageRMS[1], averageRMS[2])
        else:  # AHRS mode
            f.write('------------------------------------------\n')
        line = line + '[%7.4f %7.4f %7.4f ]\n' % (
            averageRMS_euler[0] * RAD2DEG, averageRMS_euler[1] * RAD2DEG,
            averageRMS_euler[2] * RAD2DEG)
        f.write(line)

        line = 'THRESHOLD:      '
        if navMode:
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (
                thresholds[3], thresholds[4], thresholds[5])
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (
                thresholds[0], thresholds[1], thresholds[2])
        line = line + '[%7.4f %7.4f %7.4f ]\n' % (thresholds[6] * RAD2DEG,
                                                  thresholds[7] * RAD2DEG,
                                                  thresholds[8] * RAD2DEG)
        f.write(line)

        line = 'RATIO:          '
        if navMode:
            f.write(
                '------------------------------------------------------------------------------------------------------------\n'
            )
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (
                specRatio[3], specRatio[4], specRatio[5])
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (
                specRatio[0], specRatio[1], specRatio[2])
        else:  # AHRS mode
            f.write('------------------------------------------\n')
        line = line + '[%7.4f %7.4f %7.4f ]\n' % (specRatio[6], specRatio[7],
                                                  specRatio[8])
        f.write(line)

        def pass_fail(ratio):
            return 'FAIL' if ratio > 1.0 else 'PASS'

        line = 'PASS/FAIL:      '
        if navMode:
            line = line + '[   %s    %s    %s ],     ' % (pass_fail(
                specRatio[3]), pass_fail(specRatio[4]), pass_fail(specRatio[5])
                                                          )  # LLA
            line = line + '[   %s    %s    %s ],     ' % (pass_fail(
                specRatio[0]), pass_fail(specRatio[1]), pass_fail(specRatio[2])
                                                          )  # UVW
        line = line + '[   %s    %s    %s ]\n' % (pass_fail(
            specRatio[6]), pass_fail(specRatio[7]), pass_fail(specRatio[8])
                                                  )  # ATT
        f.write(line)

        if navMode:
            f.write(
                '                                                                                         '
            )
        else:  # AHRS mode
            f.write('                  ')
        f.write('(' + mode + ' mode)\n\n')

        # Print Mounting Biases
        f.write('--------------- Angular Mounting Biases ----------------\n')
        f.write('Device       Euler Biases[   (deg)     (deg)     (deg) ]\n')
        for n in range(0, numDev):
            devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])
            f.write('%2d SN%d               [ %7.4f   %7.4f   %7.4f ]\n' %
                    (n, devInfo.v['serialNumber'][-1], mount_bias_euler[n, 0] *
                     RAD2DEG, mount_bias_euler[n, 1] * RAD2DEG,
                     mount_bias_euler[n, 2] * RAD2DEG))
        f.write('\n')

        # Print Device Version Information
        f.write(
            '------------------------------------------- Device Info -------------------------------------------------\n'
        )
        for n in range(0, numDev):
            devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])
            hver = devInfo.v['hardwareVer'][-1]
            cver = devInfo.v['commVer'][-1]
            fver = devInfo.v['firmwareVer'][-1]
            buld = devInfo.v['build'][-1]
            repo = devInfo.v['repoRevision'][-1]
            date = devInfo.v['buildDate'][-1]
            time = devInfo.v['buildTime'][-1]
            addi = devInfo.v['addInfo'][-1]
            f.write(
                '%2d SN%d  HW: %d.%d.%d.%d   FW: %d.%d.%d.%d build %d repo %d   Proto: %d.%d.%d.%d  Date: %04d-%02d-%02d %02d:%02d:%02d  %s\n'
                % (n, devInfo.v['serialNumber'][-1], hver[3], hver[2], hver[1],
                   hver[0], fver[3], fver[2], fver[1], fver[0], buld, repo,
                   cver[3], cver[2], cver[1], cver[0], 2000 + date[2], date[1],
                   date[0], time[3], time[2], time[1], addi))
        f.write('\n')

        f.close()

        # Automatically open report in Windows
        if 'win' in sys.platform:
            subprocess.Popen(["notepad.exe", filename])  # non-blocking call
        if 'linux' in sys.platform:
            subprocess.Popen(['gedit', filename])

    print("Done.")

    # TODO: Pass out the union of the test errors
    return averageRMS
    y += 0.7*cdf_1d_gaussian(x,mu=4,sigma=2)    
    y *=10     
    y +=3
    
    range_start=y.min()
    range_end=y.max()
    
    # Add noise
    y += 0.4*np.random.standard_normal(y.shape)
    
    
    if 1:
        plt.figure(0)
        of.plt.set_figure_size_and_location(1000,0,1000,500)
        plt.clf()
        plt.subplot(121)
        plt.cla()
        plt.plot(x,y,'.',lw=3)
        plt.title('data')
        ax = plt.gca()
        ax.tick_params(axis='y', labelsize=50)
        ax.tick_params(axis='x', labelsize=30)
        
         
    
     

    
    nPtsDense = 10000
    mr = MonotonicRegression(base=[12],nLevels=4)    
    mr.set_dense(domain_start=-10,domain_end=10)                    
示例#48
0
            v_dense.gpu2cpu()
           
            if 0:
                plt.figure(17)
                for c,A in enumerate(As):        
                    _x = np.ones((2,100))
                    m=cpa_space.cells_verts[c,0,0]
                    M=cpa_space.cells_verts[c,1,0]
                    _x[0] = np.linspace(m,M,100)
                    _v = A.dot(_x).flatten()
                    plt.plot(_x[0],_v) 
                      
            if 1:
#                plt.figure()
                plt.figure(1);plt.clf()
                plt.subplot(231)                 
                plt.plot(interval,src.cpu)   
                plt.title('src')
                plt.subplot(232)         
#                plt.plot(interval[1:],np.diff(src)/(interval[1]-interval[0]))
                dx=interval[1]-interval[0]
                plt.plot(interval[1:],np.diff(src.cpu.ravel())/dx)
                plt.title(" d/dx src")  
                plt.ylim(0,.5)
                plt.subplot(233)
                plt.plot(np.linspace(cpa_space.XMINS[0],cpa_space.XMAXS[0],
                                     interval.size),v_dense.cpu.ravel())
                plt.ylim(-1,1)
                plt.title('velocity')           
                plt.subplot(234)
                plt.plot(interval,transformed.cpu)
示例#49
0
import viscojapan as vj
from pylab import plt

nth_epoch = 28
fault_file = '../../../fault_model/fault_bott80km.h5'

reader = vj.EpochalFileReader('slip0.h5')
epochs = reader.get_epochs()
slip = reader[epochs[nth_epoch]]
plt.subplot(1,2,1)
mplt = vj.plots.MapPlotFault(fault_file)
mplt.plot_slip(slip)

#############
fault_file = '../../../../iter0/fault_model/fault_bott120km.h5'
reader = vj.inv.ResultFileReader('nco_06_naslip_10.h5',fault_file)
slip = reader.get_incr_slip_at_nth_epoch(nth_epoch)

plt.subplot(1,2,2)
mplt = vj.plots.MapPlotFault(fault_file)
mplt.plot_slip(slip)
plt.show()

plt.close()

示例#50
0
            raw_input()

            hx, hy = my_dict['history_x'], my_dict['history_y']

            lines_shape = (18, 512)
            # The initial points
            lines_old_x = hx[0].reshape(lines_shape).copy()
            lines_old_y = hy[0].reshape(lines_shape).copy()
            # The final points
            lines_new_x = hx[-1].reshape(lines_shape).copy()
            lines_new_y = hy[-1].reshape(lines_shape).copy()

            c = 'r'
            fig = plt.figure()
            plt.subplot(121)
            for line_x, line_y in zip(lines_old_x, lines_old_y):
                plt.plot(line_x, line_y, c)
                plt.axis('scaled')
                q = 100
                plt.xlim(0 - q, 512 + q)
                plt.ylim(0 - q, 512 + q)
                plt.gca().invert_yaxis()
            c = 'b'
            plt.subplot(122)
            for line_x, line_y in zip(lines_new_x, lines_new_y):
                plt.plot(line_x, line_y, c)
                plt.axis('scaled')
                q = 500
                plt.xlim(0 - q, 512 + q)
                plt.ylim(0 - q, 512 + q)
示例#51
0
文件: plotting.py 项目: 0x0all/rep
 def _plot(self):
     for i, plotter in enumerate(self.plots):
         plt.subplot(len(self.plots), 1, i + 1)
         plotter.plot(fontsize=self.fontsize_, show_legend=self.show_legend_)
        
        velTess /=  nC/10
        
    else:
        tw.sample_gaussian_velTess(level=0,Avees=cpa_space.Avees,velTess=velTess,mu=None)
     


        
    tw.update_pat_from_velTess(velTess,level=0)
    tw.calc_v(level=0)
    tw.v_dense.gpu2cpu()

    
    plt.clf()
    plt.subplot(221)
    plt.plot(tw.x_dense.cpu.ravel(),tw.v_dense.cpu.ravel(),'b-')
    plt.grid('on')
    plt.title(r'$v(x)$')

    
    plt.plot(x_tess,velTess,'ro')
    
    
   
#    pts_fwd_cf =  closed_form_int.calc_phi(0.6-0.0001,velTess=velTess,t=1.0)    
#     
#    print
#    print 'pts_fwd_cf',pts_fwd_cf
#    
   
        'pert_scale==@best_svd_pert_scale & n_svs==@best_svd_n_svs & svd_leadtime==@best_svd_svd_leadtime'
    )

    # drop
    sub = df_drop.query('leadtime==@selection_leadtime & n_ens==n_ens')
    best_drop_sub = sub.iloc[opt_func(sub[optimization_var + '_drop'].values)]
    best_drop_rate = best_drop_sub['drop_rate']
    best_drop = df_drop.query('drop_rate==@best_drop_rate')

    sub_svd = best_svd.query('n_ens==@n_ens')
    sub_rand = best_rand.query('n_ens==@n_ens')
    sub_drop = best_drop.query('n_ens==@n_ens')

    sub_netens = df_netens.query('n_ens==20')
    #plt.figure(figsize=figsize)
    plt.subplot(3, 3, 1 + iplot)
    plt.plot(sub_svd['leadtime'],
             sub_svd['rmse_ensmean_svd'],
             label='svd',
             color=colors[0])
    plt.plot(sub_rand['leadtime'],
             sub_rand['rmse_ensmean_rand'],
             label='rand',
             color=colors[1],
             zorder=1)
    plt.plot(sub_drop['leadtime'],
             sub_drop['rmse_ensmean_drop'],
             label='drop',
             color=colors[2])
    plt.plot(sub_netens['leadtime'],
             sub_netens['rmse_ensmean_netens'],
示例#54
0
def link_level_hour(levels, usages, quantiles, scheme, direction, color, nnames, lnames, admat=None):
    """
    Make a color mesh of a node's average hourly usage of links at different
    levels.
    """
    if not admat:
        admat = np.genfromtxt('./settings/eadmat.txt')
    if color == 'solar':
        cmap = Oranges_cmap
    elif color == 'wind':
        cmap = Blues_cmap
    elif color == 'backup':
        cmap = 'Greys'
    links, nodes, lapse = usages.shape
    usages = np.reshape(usages, (links, nodes, lapse / 24, 24))
    totalHour = np.zeros((levels, 24))
    totalNormed = np.zeros((levels, 24))
    for node in range(nodes):
        nl = neighbor_levels(node, levels, admat)
        hourSums = np.zeros((levels, 24))
        for lvl in range(levels):
            ll = link_level(nl, lvl, nnames, lnames)
            ll = np.array(ll, dtype='int')
            meanSum = np.sum(np.mean(usages[ll, node], axis=1), axis=0)
            linkSum = sum(quantiles[ll])
            hourSums[lvl] = meanSum / linkSum
        totalHour += hourSums

        plt.figure(figsize=(9, 3))
        ax = plt.subplot()
        plt.pcolormesh(hourSums, cmap=cmap)
        plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
        ax.set_yticks(np.linspace(.5, levels - .5, levels))
        ax.set_yticklabels(range(1, levels + 1))
        ax.yaxis.set_tick_params(width=0)
        ax.xaxis.set_tick_params(width=0)
        ax.set_xticks(np.linspace(.5, 23.5, 24))
        ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
        plt.ylabel('Link level')
        plt.axis([0, 24, 0, levels])
        plt.title(nnames[node] + ' ' + direction + ' ' + color)
        plt.savefig(figPath + '/hourly/' + str(scheme) + '/' + str(node) + '_' + color + '_' + direction + '.pdf', bbox_inches='tight')
        plt.close()

        hourSums = hourSums / np.sum(hourSums, axis=1)[:, None]
        totalNormed += hourSums
        plt.figure(figsize=(9, 3))
        ax = plt.subplot()
        plt.pcolormesh(hourSums, cmap=cmap)
        plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
        ax.set_yticks(np.linspace(.5, levels - .5, levels))
        ax.set_yticklabels(range(1, levels + 1))
        ax.yaxis.set_tick_params(width=0)
        ax.xaxis.set_tick_params(width=0)
        ax.set_xticks(np.linspace(.5, 23.5, 24))
        ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
        plt.ylabel('Link level')
        plt.axis([0, 24, 0, levels])
        plt.title(nnames[node] + ' ' + direction + ' ' + color)
        plt.savefig(figPath + '/hourly/' + str(scheme) + '/normed/' + str(node) + '_' + color + '_' + direction + '.pdf', bbox_inches='tight')
        plt.close()

    # Plot average hourly usage
    totalHour /= nodes
    plt.figure(figsize=(9, 3))
    ax = plt.subplot()
    plt.pcolormesh(totalHour, cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(.5, 23.5, 24))
    ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.axis([0, 24, 0, levels])
    plt.savefig(figPath + '/hourly/' + str(scheme) + '/total_' + color + '_' + direction + '.pdf', bbox_inches='tight')
    plt.close()

    totalNormed /= nodes
    plt.figure(figsize=(9, 3))
    ax = plt.subplot()
    plt.pcolormesh(totalNormed, cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(.5, 23.5, 24))
    ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.axis([0, 24, 0, levels])
    plt.savefig(figPath + '/hourly/' + str(scheme) + '/normed/total_' + color + '_' + direction + '.pdf', bbox_inches='tight')
    plt.close()
示例#55
0
## begin to plot


def axhspan_for_viscosity(ax):
    deps = [0, -51, -220, -670, -2000]
    cols = ['.95', '0.83', '0.7', '0.5']
    alpha = .8
    for dep1, dep2, col in zip(deps[0:], deps[1:], cols):
        ax.axhspan(dep2, dep1, color=col, alpha=alpha)


gs = gridspec.GridSpec(1, 3, width_ratios=[1.5, 1.7, 1.1])

fig = plt.gcf()
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1], sharey=ax1)
ax3 = plt.subplot(gs[2], sharey=ax1)

# plot ax1 - Maxwell viscosity
axhspan_for_viscosity(ax1)
ax1.tick_params(axis='x', bottom='off', top='off', labelbottom='off')

ax1.text(0.2, -40, s='crust  $H_e=?$', fontsize=12)
ax1.text(0.2, -170, s='asthenosphere \n$ \\eta = ?$', fontsize=12)
ax1.text(0.2,
         -450,
         s='uppermantle \n$ \\eta = 1 \\times 10^{20}$',
         fontsize=12)
ax1.text(0.2,
         -850,
示例#56
0
    def disp(self,sampler,ds_quiver=None):
        
        level=sampler.level    
        theta=sampler.theta_current
        tw=self.tw
        scale_quiver=self.scale_quiver
        if ds_quiver is None:
            ds_quiver=min([tw.nCols,tw.nRows])/32
        
        markersize = 4
        fontsize=30
        cpa_space = tw.ms.L_cpa_space[level]            
        plt.subplot(231)
        sampler.plot_ll()
        plt.title('ll',fontsize=fontsize)
        sampler.plot_wlp()
        sampler.plot_wlp_plus_ll()
        
        plt.subplot(232)
        sampler.plot_ar()
        plt.title('accept ratio',fontsize=fontsize)
         
         
        cpa_space.theta2As(theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
    
        src = self.src
        dst = self.dst
        transformed = self.transformed
        
        src_dense=self.src_dense
        transformed_dense=self.transformed_dense
        tw.calc_T_fwd(src_dense, transformed_dense,level=level,int_quality=0)    
        
        transformed_dense.gpu2cpu()        


    
    
        cpa_space.theta2As(theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
        transformed.gpu2cpu()
        
        
        
        plt.subplot(233)
        
#        class TF:
#            use_hand_data   =False
        if self.kind == 'landmarks' and self.landmarks_are_lin_ordered:
            lin_order=1
        else:
            lin_order=0
        if lin_order==False:
    #        plt.plot(src.cpu[:,0],src.cpu[:,1],'go',ms=markersize)
            plt.plot(transformed.cpu[:,0],transformed.cpu[:,1],'ro',ms=markersize)
            plt.plot(dst.cpu[:,0],dst.cpu[:,1],'bo',ms=markersize)
            
            tw.config_plt(axis_on_or_off='on')
        
        else:
    #        plt.plot(src.cpu[:,0],src.cpu[:,1],'g-o',ms=markersize)
            plt.plot(transformed.cpu[:,0],transformed.cpu[:,1],'r-o',ms=markersize) 
            plt.plot(dst.cpu[:,0],dst.cpu[:,1],'b-o',ms=markersize)
               
            tw.config_plt(axis_on_or_off='on')
            
        
        plt.subplot(234)
        
        tw.quiver(scale=scale_quiver,ds=ds_quiver)
#        1/0
#        cpa_space.plot_cells()
        
#        if TF.use_hand_data == False:
#            cpa_space_gt.theta2As(theta_gt)
#            tw.update_pat(level=level_gt)          
#            tw.calc_v(level=level_gt)
#            tw.v_dense.gpu2cpu() 
        
        if lin_order:
            plt.plot(src.cpu[:,0],src.cpu[:,1],'g-o',ms=markersize)
            plt.plot(dst.cpu[:,0],dst.cpu[:,1],'b-o',ms=markersize)
    #        plt.plot(transformed.cpu[:,0],transformed.cpu[:,1],'r-o',ms=markersize) 
        tw.config_plt(axis_on_or_off='on')
    
        
        if lin_order== False:
            plt.subplot(234)
            tw.quiver(scale=scale_quiver)
            cpa_space.plot_cells()
            tw.config_plt(axis_on_or_off='on')
            plt.title(r'$v^\theta$',
                       fontsize=20)
    
        else:
            plt.subplot(235)
            tw.imshow_vx()
            plt.title(r'$v^\theta_{\mathrm{horizontal}}$',
                      fontsize=20)

            cpa_space.plot_cells()
            tw.config_plt(axis_on_or_off='on')
            plt.subplot(236)
            tw.imshow_vy()
            plt.title(r'$v^\theta_{\mathrm{vertical}}$',
                       fontsize=20)
            cpa_space.plot_cells()
            tw.config_plt(axis_on_or_off='on')
     
        
        if self.kind == 'landmarks' and self.landmarks_are_lin_ordered:
            plt.subplot(233)
            plt.legend([r'$T^\theta(\mathrm{src})$',r'$\mathrm{dst}$'],loc='lower right',
                        fontsize=20)


            plt.subplot(234)
            plt.legend([r'$\mathrm{src}$',r'$\mathrm{dst}$'],loc='lower right',
                        fontsize=20)
示例#57
0
def example(tess='I',
            base=[2, 2, 2],
            nLevels=1,
            zero_v_across_bdry=[True] * 3,
            vol_preserve=False,
            nRows=100,
            nCols=100,
            nSlices=100,
            use_mayavi=False,
            eval_v=False,
            eval_cell_idx=False):

    tw = TransformWrapper(nRows=nRows,
                          nCols=nCols,
                          nSlices=nSlices,
                          nLevels=nLevels,
                          base=base,
                          zero_v_across_bdry=zero_v_across_bdry,
                          tess=tess,
                          valid_outside=False,
                          only_local=False,
                          vol_preserve=vol_preserve)

    print_iterable(tw.ms.L_cpa_space)
    print tw

    # create some fake 3D image.
    img = np.zeros((nCols, nRows, nSlices), dtype=np.float64)

    #    img[:]=np.random.random_integers(0,255,img.shape)

    # Fill the image with the x coordinates as fake values
    img[:] = tw.pts_src_dense.cpu[:, 0].reshape(img.shape)

    img0 = CpuGpuArray(img.copy().astype(np.float64))
    img_wrapped_fwd = CpuGpuArray.zeros_like(img0)
    img_wrapped_inv = CpuGpuArray.zeros_like(img0)

    seed = 0
    np.random.seed(seed)

    ms_Avees = tw.get_zeros_PA_all_levels()
    ms_theta = tw.get_zeros_theta_all_levels()

    if tess == 'II':
        for level in range(tw.ms.nLevels):
            cpa_space = tw.ms.L_cpa_space[level]
            Avees = ms_Avees[level]
            #            1/0
            if level == 0:
                tw.sample_gaussian(level,
                                   ms_Avees[level],
                                   ms_theta[level],
                                   mu=None)  # zero mean
                #                ms_theta[level].fill(0)
                #                ms_theta[level][-4]=10
                cpa_space.theta2Avees(theta=ms_theta[level], Avees=Avees)
            else:
                tw.sample_from_the_ms_prior_coarse2fine_one_level(
                    ms_Avees, ms_theta, level_fine=level)
    else:
        # For tess='I' in 3D, I have yet to implement the coarse-to-fine sampling.
        for level in range(tw.ms.nLevels):
            cpa_space = tw.ms.L_cpa_space[level]
            velTess = cpa_space.zeros_velTess()
            ms_Avees[level].fill(0)
            Avees = ms_Avees[level]
            tw.sample_gaussian_velTess(level, Avees, velTess, mu=None)

    print 'img shape:', img0.shape

    # You don't have use these. You can use any 2d array
    # that has 3 columns (regardless of the number of rows).
    pts_src = tw.pts_src_dense
    pts_src = CpuGpuArray(pts_src.cpu[::1].copy())

    # Create a buffer for the output
    pts_fwd = CpuGpuArray.zeros_like(pts_src)
    pts_inv = CpuGpuArray.zeros_like(pts_src)

    for level in range(tw.ms.nLevels):
        tw.update_pat_from_Avees(ms_Avees[level], level)

        if eval_v:
            # Evaluating the velocity field.
            # You don't have to do it in unless you want to visualize v.
            # (when evaluting the treansformation, v will be internally
            # evaluated anyway -- but its result won't be stored)
            tw.calc_v(level=level)

        print 'level', level
        print
        print 'number of points:', len(pts_src)
        print 'number of cells:', tw.ms.L_cpa_space[level].nC

        # optional, if you want to time it
        timer_gpu_T_fwd = GpuTimer()

        # Simply calling
        #   tic = time.clock()
        # and then
        #   tic = time.clock()
        # won't work.
        # In fact, most likely you will get that toc-tic is zero.
        # You need to use the GpuTimer object. When you do that,
        # one side effect is that suddenly the toc-tic from above will
        # give you a more realistic result.

        tic = time.clock()
        timer_gpu_T_fwd.tic()
        tw.calc_T_fwd(pts_src, pts_fwd, level=level)
        timer_gpu_T_fwd.toc()
        toc = time.clock()

        print 'Time, in sec, for computing T_fwd:'
        print timer_gpu_T_fwd.secs
        print toc - tic  # likely to be 0, unless you also used the GpuTimer.

        # You can also time the inv of course. Results will be similar.
        tw.calc_T_inv(pts_src, pts_inv, level=level)

        if eval_cell_idx:
            # cell_idx is computed here just for display.
            cell_idx = CpuGpuArray.zeros(len(pts_src), dtype=np.int32)
            tw.calc_cell_idx(pts_src, cell_idx, level)

        tw.remap_fwd(pts_inv, img0, img_wrapped_fwd)
        tw.remap_inv(pts_fwd, img0, img_wrapped_inv)

        # For display purposes, do gpu2cpu transfer
        print "For display purposes, do gpu2cpu transfer"

        if eval_cell_idx:
            cell_idx.gpu2cpu()
        if eval_v:
            tw.v_dense.gpu2cpu()
        pts_fwd.gpu2cpu()
        pts_inv.gpu2cpu()
        img_wrapped_fwd.gpu2cpu()
        img_wrapped_inv.gpu2cpu()

        if use_mayavi:
            ds = 1  # downsampling factor
            i = 17
            pts_src_grid = pts_src.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_src_ds = pts_src_grid[::ds, ::ds, i].reshape(-1, 3)
            pts_fwd_grid = pts_fwd.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_fwd_ds = pts_fwd_grid[::ds, ::ds, i].reshape(-1, 3)
            pts_inv_grid = pts_inv.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_inv_ds = pts_inv_grid[::ds, ::ds, i].reshape(-1, 3)

            from of.my_mayavi import *
            mayavi_mlab_close_all()
            mayavi_mlab_figure_bgwhite('src')
            x, y, z = pts_src_ds.T
            mayavi_mlab_plot3d(x, y, z)
            mayavi_mlab_figure_bgwhite('fwd')
            x, y, z = pts_fwd_ds.T
            mayavi_mlab_plot3d(x, y, z)

        figsize = (12, 12)
        plt.figure(figsize=figsize)
        i = 17  # some slice
        plt.subplot(131)
        plt.imshow(img0.cpu[:, :, i].astype(np.uint8), interpolation="Nearest")
        plt.title('slice from img')
        plt.subplot(132)
        plt.imshow(img_wrapped_fwd.cpu[:, :, i].astype(np.uint8),
                   interpolation="Nearest")
        plt.axis('off')
        plt.title('slice from fwd(img)')
        plt.subplot(133)
        plt.imshow(img_wrapped_inv.cpu[:, :, i].astype(np.uint8),
                   interpolation="Nearest")
        plt.axis('off')
        plt.title('slice from inv(img)')

    if 0:  # debug

        cpa_space = tw.ms.L_cpa_space[level]
        if eval_v:
            vx = tw.v_dense.cpu[:, 0].reshape(
                cpa_space.x_dense_grid_img.shape[1:])
            vy = tw.v_dense.cpu[:, 1].reshape(
                cpa_space.x_dense_grid_img.shape[1:])
            vz = tw.v_dense.cpu[:, 2].reshape(
                cpa_space.x_dense_grid_img.shape[1:])

            plt.figure()
            plt.imshow(vz[:, :, 17], interpolation="Nearest")
            plt.colorbar()
            plt.title('vz in some slice')

    return tw
示例#58
0
    def disp(self,sampler,interp_type_during_visualization):
        level=sampler.level    
        theta=sampler.theta_current
        tw=self.tw
#        interval=self.interval
#        interval_dense=self.interval_dense
        markersize = 5
        fontsize=30
        cpa_space = tw.ms.L_cpa_space[level]            
        plt.subplot(231)
        sampler.plot_ll()
        plt.title('ll',fontsize=fontsize)
        sampler.plot_wlp()
        sampler.plot_wlp_plus_ll()
        if sampler.lp_func:         
            plt.legend(['ll','wlp','ll+wlp'])
        
        plt.subplot(232)
        sampler.plot_ar()
        plt.title('accept ratio',fontsize=fontsize)
         
#        print theta
        cpa_space.theta2As(theta=theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
    
        src = self.src
#        dst = self.dst
        transformed = self.transformed
        
#        src_dense=self.src_dense
#        transformed_dense=self.transformed_dense
#        tw.calc_T(src_dense, transformed_dense, mysign=1, level=level, 
#        
#        transformed_dense.gpu2cpu()

        tw.calc_T_inv(src, transformed,  level=level, 
                  int_quality=+1)            
        transformed.gpu2cpu()
        
        if interp_type_during_visualization=='gpu_linear':
            my_dtype = np.float64
        else:
            my_dtype = np.float32 # For opencv
        
        img_src = self.signal.src.cpu.reshape(tw.nRows,tw.nCols)
        img_src = CpuGpuArray(img_src.astype(my_dtype))  
        img_wrapped = CpuGpuArray.zeros_like(img_src)

        img_dst = self.signal.dst.cpu.reshape(tw.nRows,tw.nCols)
        img_dst = CpuGpuArray(img_dst)         
        
                
        if interp_type_during_visualization=='gpu_linear':
            tw.remap_fwd(transformed,img_src,img_wrapped)
        else:
            tw.remap_fwd_opencv(transformed,img_src,img_wrapped,interp_type_during_visualization)
        img_wrapped.gpu2cpu()
             
        plt.subplot(233)   
        plt.imshow(img_src.cpu,interpolation="None")
        plt.gray()
        cpa_space.plot_cells('r')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}$')

        
                
        
        plt.subplot(234)   
        plt.imshow(img_wrapped.cpu,interpolation="None")
        plt.gray()
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}\circ T^{\theta}$')
        
        plt.subplot(235)   
        plt.imshow(img_dst.cpu,interpolation="None")
        plt.gray()
        plt.title(r'$I_{\mathrm{dst}}$')
        
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        
        plt.subplot(2,6,11)
        self.tw.imshow_vx()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_x$')
        plt.subplot(2,6,12)
        self.tw.imshow_vy()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_y$')
示例#59
0
fault_file = '../../fault_model/fault_bott60km.h5'
earth_file = '../../earth_model_nongravity/He50km_VisM6.3E18/earth.model_He50km_VisM6.3E18'

compute = vj.MomentCalculator(fault_file, earth_file)
epochs = reader.epochs
mos = []
mws = []


for nth, epoch in enumerate(epochs):
    aslip = reader.get_total_slip_at_nth_epoch(nth)
    mo, mw = compute.compute_moment(aslip)
    mos.append(mo)
    mws.append(mw)

plt.subplot(211)
plt.plot(epochs, mos)
plt.subplot(212)
plt.plot(epochs, mws)
plt.savefig('total_slip_mo_mw_evolution.png')
plt.close()


mos = []
mws = []
for nth, epoch in enumerate(epochs):
    if nth == 0:
        continue
    aslip = reader.get_after_slip_at_nth_epoch(nth)
    mo, mw = compute.compute_moment(aslip)
    mos.append(mo)
示例#60
0
Hes = []
for f in files:
    nth_epochs = int(f.split('_')[-5])
    print(nth_epochs)
    reader = vj.inv.ResultFileReader(f)
    log_vis = reader.get_nlin_par_solved_value('log10(visM)')
    log_He = reader.get_nlin_par_solved_value('log10(He)')
    vis = 10**log_vis
    He = 10**log_He
    
    vises.append(vis)
    Hes.append(He)

max_time = [max(epochs) for epochs in epochs_list]

ax1 = plt.subplot(211)
plt.plot(max_time, vises, 'x-')
plt.grid('on')
plt.ylabel(r'viscosity $(Pa \cdot s)$')
plt.setp(ax1.get_xticklabels(), visible=False)

plt.subplot(212, sharex=ax1)
plt.plot(max_time, Hes, '^-')
plt.ylabel(r'He (km)')
plt.grid('on')
plt.xlabel('days of data used')

plt.savefig('diff_days_span.png')
plt.show()