Пример #1
0
def plot_part2(filename):
	"""
	Plots the result of count ones test
	"""
	fig1 = pl.figure()
	iterations, runtimes, fvals = extract(filename)
	algos = ["SA", "GA", "MIMIC"]
	iters_sa, iters_ga, iters_mimic = [np.array(iterations[a]) for a in algos]
	runtime_sa, runtime_ga, runtime_mimic = [np.array(runtimes[a]) for a in algos]
	fvals_sa, fvals_ga, fvals_mimic = [np.array(fvals[a]) for a in algos]

	plotfunc = getattr(pl, "loglog")
	plotfunc(runtime_sa, fvals_sa, "bs", mew=0)
	plotfunc(runtime_ga, fvals_ga, "gs", mew=0)
	plotfunc(runtime_mimic, fvals_mimic, "rs", mew=0)

	# plotfunc(iters_sa, fvals_sa/(runtime_sa * iters_sa), "bs", mew=0)
	# plotfunc(iters_ga, fvals_ga/(runtime_ga * iters_ga), "gs", mew=0)
	# plotfunc(iters_mimic, fvals_mimic/(runtime_mimic * iters_mimic), "rs", mew=0)

	pl.xlabel("Runtime (seconds)")
	pl.ylabel("Objective function value")
	pl.ylim([min(fvals_sa) / 2, max(fvals_mimic) * 2])
	pl.legend(["SA", "GA", "MIMIC"], loc=4)

	pl.savefig(filename.replace(".csv", ".png"), bbox_inches="tight") 
Пример #2
0
    def plot_field(self,x,y,u=None,v=None,F=None,contour=False,outdir=None,plot='quiver',figname='_field',format='eps'):
        outdir = self.set_dir(outdir)
        p = 64
        if F is None: F=self.calc_F(u,v)

        plt.close('all')
        plt.figure()
        #fig, axes = plt.subplots(nrows=1)
        if contour:
            plt.hold(True)
            plt.contourf(x,y,F)
        if plot=='quiver':
            plt.quiver(x[::p],y[::p],u[::p],v[::p],scale=0.1)
        
        if plot=='pcolor':
            plt.pcolormesh(x[::4],y[::4],F[::4],cmap=plt.cm.Pastel1)
            plt.colorbar()

        if plot=='stream':
            speed = F[::16]
            plt.streamplot(x[::16], y[::16], u[::16], v[::16], density=(1,1),color='k')

        plt.xlabel('$x$ (a.u.)')
        plt.ylabel('$y$ (a.u.)')
        
        plt.savefig(os.path.join(outdir,figname+'.'+format),format=format,dpi=320,bbox_inches='tight')
        plt.close()
Пример #3
0
def plot_feat_hist(data_name_list, filename=None):
    if len(data_name_list)>1:
        assert filename is not None

    pylab.figure(num=None, figsize=(8, 6))
    num_rows = 1 + (len(data_name_list) - 1) / 2
    num_cols = 1 if len(data_name_list) == 1 else 2
    pylab.figure(figsize=(5 * num_cols, 4 * num_rows))

    for i in range(num_rows):
        for j in range(num_cols):
            pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
            x, name = data_name_list[i * num_cols + j]
            pylab.title(name)
            pylab.xlabel('Value')
            pylab.ylabel('Fraction')
            # the histogram of the data
            max_val = np.max(x)
            if max_val <= 1.0:
                bins = 50
            elif max_val > 50:
                bins = 50
            else:
                bins = max_val
            n, bins, patches = pylab.hist(
                x, bins=bins, normed=1, facecolor='blue', alpha=0.75)

            pylab.grid(True)

    if not filename:
        filename = "feat_hist_%s.png" % name.replace(" ", "_")

    pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
Пример #4
0
    def check_models(self):
        plt.figure('Bandgap narrowing')
        Na = np.logspace(12, 20)
        Nd = 0.
        dn = 1e14
        temp = 300.

        for author in self.available_models():
            BGN = self.update(Na=Na, Nd=Nd, nxc=dn,
                              author=author,
                              temp=temp)

            if not np.all(BGN == 0):
                plt.plot(Na, BGN, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'Bgn.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for name in data.dtype.names[1:]:
            plt.plot(
                data['N'], data[name], 'r--',
                label='PV-lighthouse\'s: ' + name)

        plt.semilogx()
        plt.xlabel('Doping (cm$^{-3}$)')
        plt.ylabel('Bandgap narrowing (K)')

        plt.legend(loc=0)
Пример #5
0
 def plot_bernoulli_matrix(self, show_npfs=False):
   """
   Plot the heatmap of the Bernoulli matrix 
   @self
   @show_npfs - Highlight NPFS detections [Boolean] 
   """
   matrix = self.Bernoulli_matrix
   if show_npfs == False:
     plot = plt.imshow(matrix)
     plot.set_cmap('hot')
     plt.colorbar()
     plt.xlabel("Bootstraps")
     plt.ylabel("Feature")
     plt.show()
   else:
     for i in self.selected_features:
       for k in range(len(matrix[i])):
         matrix[i,k] = .5
     plot = plt.imshow(matrix)
     plot.set_cmap('hot')
     plt.xlabel("Bootstraps")
     plt.ylabel("Feature")
     plt.colorbar()
     plt.show()
   return None
Пример #6
0
def study_sdss_density(hemi='south'):
    grid = grid3d(hemi=hemi)
    n_data = num_sdss_data_both_catalogs(hemi, grid)
    n_rand, weight = num_sdss_rand_both_catalogs(hemi, grid)
    n_rand *= ((n_data*weight).sum() / (n_rand*weight).sum())
    delta = (n_data - n_rand) / n_rand
    delta[weight==0]=0.
    fdelta = np.fft.fftn(delta*weight)
    power = np.abs(fdelta)**2.
    ks = get_wavenumbers(delta.shape, grid.reso_mpc)
    kmag = ks[3]
    kbin = np.arange(0,0.06,0.002)
    ind = np.digitize(kmag.ravel(), kbin)
    power_ravel = power.ravel()
    power_bin = np.zeros_like(kbin)
    for i in range(len(kbin)):
        print i
        wh = np.where(ind==i)[0]
        power_bin[i] = power_ravel[wh].mean()
    #pl.clf()
    #pl.plot(kbin, power_bin)
    from cosmolopy import perturbation
    pk = perturbation.power_spectrum(kbin, 0.4, **cosmo)
    pl.clf(); pl.plot(kbin, power_bin/pk, 'b')
    pl.plot(kbin, power_bin/pk, 'bo')    
    pl.xlabel('k (1/Mpc)',fontsize=16)
    pl.ylabel('P(k) ratio, DATA/THEORY [arb. norm.]',fontsize=16)
    ipdb.set_trace()
Пример #7
0
def make_corr1d_fig(dosave=False):
    corr = make_corr_both_hemi()
    lw=2; fs=16
    pl.figure(1)#, figsize=(8, 7))
    pl.clf()
    pl.xlim(4,300)
    pl.ylim(-400,+500)    
    lambda_titles = [r'$20 < \lambda < 30$',
                     r'$30 < \lambda < 40$',
                     r'$\lambda > 40$']
    colors = ['blue','green','red']
    for i in range(3):
        corr1d, rcen = corr_1d_from_2d(corr[i])
        ipdb.set_trace()
        pl.semilogx(rcen, corr1d*rcen**2, lw=lw, color=colors[i])
        #pl.semilogx(rcen, corr1d*rcen**2, 'o', lw=lw, color=colors[i])
    pl.xlabel(r'$s (Mpc)$',fontsize=fs)
    pl.ylabel(r'$s^2 \xi_0(s)$', fontsize=fs)    
    pl.legend(lambda_titles, 'lower left', fontsize=fs+3)
    pl.plot([.1,10000],[0,0],'k--')
    s_bao = 149.28
    pl.plot([s_bao, s_bao],[-9e9,+9e9],'k--')
    pl.text(s_bao*1.03, 420, 'BAO scale')
    pl.text(s_bao*1.03, 370, '%0.1f Mpc'%s_bao)
    if dosave: pl.savefig('xi1d_3bin.pdf')
Пример #8
0
def study_redmapper_2d():
    # I just want to know the typical angular separation for RM clusters.
    # I'm going to do this in a lazy way.
    hemi = 'north'
    rm = load_redmapper(hemi=hemi)
    ra = rm['ra']
    dec = rm['dec']
    ncl = len(ra)
    dist = np.zeros((ncl, ncl))
    for i in range(ncl):
        this_ra = ra[i]
        this_dec = dec[i]
        dra = this_ra-ra
        ddec = this_dec-dec
        dxdec = dra*np.cos(this_dec*np.pi/180.)
        dd = np.sqrt(dxdec**2. + ddec**2.)
        dist[i,:] = dd
        dist[i,i] = 99999999.
    d_near_arcmin = dist.min(0)*60.
    pl.clf(); pl.hist(d_near_arcmin, bins=100)
    pl.title('Distance to Nearest Neighbor for RM clusters')
    pl.xlabel('Distance (arcmin)')
    pl.ylabel('N')
    fwhm_planck_217 = 5.5 # arcmin
    sigma = fwhm_planck_217/2.355
    frac_2sigma = 1.*len(np.where(d_near_arcmin>2.*sigma)[0])/len(d_near_arcmin)
    frac_3sigma = 1.*len(np.where(d_near_arcmin>3.*sigma)[0])/len(d_near_arcmin)
    print '%0.3f percent of RM clusters are separated by 2-sigma_planck_beam'%(100.*frac_2sigma)
    print '%0.3f percent of RM clusters are separated by 3-sigma_planck_beam'%(100.*frac_3sigma)    
    ipdb.set_trace()
Пример #9
0
def study_multiband_planck(quick=True):
    savename = datadir+'cl_multiband.pkl'
    bands = [100, 143, 217, 'mb']
    if quick: cl = pickle.load(open(savename,'r'))
    else:
        cl = {}
        mask = load_planck_mask()
        mask_factor = np.mean(mask**2.)
        for band in bands:
            this_map = load_planck_data(band)
            this_cl = hp.anafast(this_map*mask, lmax=lmax)/mask_factor
            cl[band] = this_cl
        pickle.dump(cl, open(savename,'w'))


    cl_theory = {}
    pl.clf()
    
    for band in bands:
        l_theory, cl_theory[band] = get_cl_theory(band)
        this_cl = cl[band]
        pl.plot(this_cl/cl_theory[band])
        
    pl.legend(bands)
    pl.plot([0,4000],[1,1],'k--')
    pl.ylim(.7,1.3)
    pl.ylabel('data/theory')
Пример #10
0
    def plot(self, title=None, **kwargs):
        """Generates a pylab plot from the result set.

        ``matplotlib`` must be installed, and in an
        IPython Notebook, inlining must be on::

            %%matplotlib inline

        The first and last columns are taken as the X and Y
        values.  Any columns between are ignored.

        Parameters
        ----------
        title: Plot title, defaults to names of Y value columns

        Any additional keyword arguments will be passsed
        through to ``matplotlib.pylab.plot``.
        """
        import matplotlib.pylab as plt
        self.guess_plot_columns()
        self.x = self.x or range(len(self.ys[0]))
        coords = reduce(operator.add, [(self.x, y) for y in self.ys])
        plot = plt.plot(*coords, **kwargs)
        if hasattr(self.x, 'name'):
            plt.xlabel(self.x.name)
        ylabel = ", ".join(y.name for y in self.ys)
        plt.title(title or ylabel)
        plt.ylabel(ylabel)
        return plot
Пример #11
0
    def bar(self, key_word_sep = " ", title=None, **kwargs):
        """Generates a pylab bar plot from the result set.

        ``matplotlib`` must be installed, and in an
        IPython Notebook, inlining must be on::

            %%matplotlib inline

        The last quantitative column is taken as the Y values;
        all other columns are combined to label the X axis.

        Parameters
        ----------
        title: Plot title, defaults to names of Y value columns
        key_word_sep: string used to separate column values
                      from each other in labels

        Any additional keyword arguments will be passsed
        through to ``matplotlib.pylab.bar``.
        """
        import matplotlib.pylab as plt
        self.guess_pie_columns(xlabel_sep=key_word_sep)
        plot = plt.bar(range(len(self.ys[0])), self.ys[0], **kwargs)
        if self.xlabels:
            plt.xticks(range(len(self.xlabels)), self.xlabels,
                       rotation=45)
        plt.xlabel(self.xlabel)
        plt.ylabel(self.ys[0].name)
        return plot
Пример #12
0
 def test_simple_gen(self):
     self_con = .8
     other_con = 0.05
     g = self.gen.gen_stoch_blockmodel(min_degree=1, blocks=5, self_con=self_con, other_con=other_con,
                                       powerlaw_exp=2.1, degree_seq='powerlaw', num_nodes=1000, num_links=3000)
     deg_hist = vertex_hist(g, 'total')
     res = fit_powerlaw.Fit(g.degree_property_map('total').a, discrete=True)
     print 'powerlaw alpha:', res.power_law.alpha
     print 'powerlaw xmin:', res.power_law.xmin
     if len(deg_hist[0]) != len(deg_hist[1]):
         deg_hist[1] = deg_hist[1][:len(deg_hist[0])]
     print 'plot degree dist'
     plt.plot(deg_hist[1], deg_hist[0])
     plt.xscale('log')
     plt.xlabel('degree')
     plt.ylabel('#nodes')
     plt.yscale('log')
     plt.savefig('deg_dist_test.png')
     plt.close('all')
     print 'plot graph'
     pos = sfdp_layout(g, groups=g.vp['com'], mu=3)
     graph_draw(g, pos=pos, output='graph.png', output_size=(800, 800),
                vertex_size=prop_to_size(g.degree_property_map('total'), mi=2, ma=30), vertex_color=[0., 0., 0., 1.],
                vertex_fill_color=g.vp['com'],
                bg_color=[1., 1., 1., 1.])
     plt.close('all')
     print 'init:', self_con / (self_con + other_con), other_con / (self_con + other_con)
     print 'real:', gt_tools.get_graph_com_connectivity(g, 'com')
    def check_models(self):
        '''
        Displays a plot of the models against that taken from a
        respected website (https://www.pvlighthouse.com.au/)
        '''
        plt.figure('Intrinsic bandgap')
        t = np.linspace(1, 500)

        for author in self.available_models():

            Eg = self.update(temp=t, author=author, multiplier=1.0)
            plt.plot(t, Eg, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'iBg.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for temp, name in zip(data.dtype.names[0::2], data.dtype.names[1::2]):
            plt.plot(
                data[temp], data[name], '--', label=name)

        plt.xlabel('Temperature (K)')
        plt.ylabel('Intrinsic Bandgap (eV)')

        plt.legend(loc=0)
        self.update(temp=0, author=author, multiplier=1.01)
Пример #14
0
def plot_confusion_matrix(cm, title='', cmap=plt.cm.Blues):
    #print cm
    #display vehicle, idle, walking accuracy respectively
    #display overall accuracy
    print type(cm)
   # plt.figure(index
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    #plt.figure("")
    plt.title("Confusion Matrix")
    plt.colorbar()
    tick_marks = [0,1,2]
    target_name = ["driving","idling","walking"]


    plt.xticks(tick_marks,target_name,rotation=45)

    plt.yticks(tick_marks,target_name,rotation=45)
    print len(cm[0])

    for i in range(0,3):
        for j in range(0,3):
         plt.text(i,j,str(cm[i,j]))
    plt.tight_layout()
    plt.ylabel("Actual Value")
    plt.xlabel("Predicted Outcome")
 def plot_values(self, TITLE, SAVE):
     plot(self.list_of_densities, self.list_of_pressures)
     title(TITLE)
     xlabel("Densities")
     ylabel("Pressure")
     savefig(SAVE)
     show()
Пример #16
0
   def __call__(self, **params):

       p = ParamOverrides(self, params)
       fig = plt.figure(figsize=(5, 5))

       # This one-liner works in Octave, but in matplotlib it
       # results in lines that are all connected across rows and columns,
       # so here we plot each line separately:
       #   plt.plot(x,y,"k-",transpose(x),transpose(y),"k-")
       # Here, the "k-" means plot in black using solid lines;
       # see matplotlib for more info.
       isint = plt.isinteractive() # Temporarily make non-interactive for
       # plotting
       plt.ioff()
       for r, c in zip(p.y[::p.skip], p.x[::p.skip]):
           plt.plot(c, r, "k-")
       for r, c in zip(np.transpose(p.y)[::p.skip],np.transpose(p.x)[::p.skip]):
           plt.plot(c, r, "k-")

       # Force last line avoid leaving cells open
       if p.skip != 1:
           plt.plot(p.x[-1], p.y[-1], "k-")
           plt.plot(np.transpose(p.x)[-1], np.transpose(p.y)[-1], "k-")

       plt.xlabel('x')
       plt.ylabel('y')
       # Currently sets the input range arbitrarily; should presumably figure out
       # what the actual possible range is for this simulation (which would presumably
       # be the maximum size of any GeneratorSheet?).
       plt.axis(p.axis)

       if isint: plt.ion()
       self._generate_figure(p)
       return fig
Пример #17
0
def inter_show(start, lc, eta, vol_ins, props, lbl_outs, grdts, pars):
    '''
    Plots a display of training information to the screen
    '''
    import matplotlib.pylab as plt
    name_in, vol  = vol_ins.popitem()
    name_p,  prop = props.popitem()
    name_l,  lbl  = lbl_outs.popitem()
    name_g,  grdt = grdts.popitem()

    m_input = volume_util.crop(vol[0,:,:,:], prop.shape[-3:]) #good enough for now

    # real time visualization
    plt.subplot(251),   plt.imshow(vol[0,0,:,:],    interpolation='nearest', cmap='gray')
    plt.xlabel('input')
    plt.subplot(252),   plt.imshow(m_input[0,:,:],    interpolation='nearest', cmap='gray')
    plt.xlabel('matched input')
    plt.subplot(253),   plt.imshow(prop[0,0,:,:],   interpolation='nearest', cmap='gray')
    plt.xlabel('output')
    plt.subplot(254),   plt.imshow(lbl[0,0,:,:],    interpolation='nearest', cmap='gray')
    plt.xlabel('label')
    plt.subplot(255),   plt.imshow(grdt[0,0,:,:],   interpolation='nearest', cmap='gray')
    plt.xlabel('gradient')

    plt.subplot(256)
    plt.plot(lc.tn_it, lc.tn_err, 'b', label='train')
    plt.plot(lc.tt_it, lc.tt_err, 'r', label='test')
    plt.xlabel('iteration'), plt.ylabel('cost energy')
    plt.subplot(257)
    plt.plot( lc.tn_it, lc.tn_cls, 'b', lc.tt_it, lc.tt_cls, 'r')
    plt.xlabel('iteration'), plt.ylabel( 'classification error' )
    return
Пример #18
0
def plot_feat_hist(data_name_list, filename=None):
    pylab.clf()
    # import pdb;pdb.set_trace()
    num_rows = 1 + (len(data_name_list) - 1) / 2
    num_cols = 1 if len(data_name_list) == 1 else 2
    pylab.figure(figsize=(5 * num_cols, 4 * num_rows))

    for i in range(num_rows):
        for j in range(num_cols):
            pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
            x, name = data_name_list[i * num_cols + j]
            pylab.title(name)
            pylab.xlabel('Value')
            pylab.ylabel('Density')
            # the histogram of the data
            max_val = np.max(x)
            if max_val <= 1.0:
                bins = 50
            elif max_val > 50:
                bins = 50
            else:
                bins = max_val
            n, bins, patches = pylab.hist(
                x, bins=bins, normed=1, facecolor='green', alpha=0.75)

            pylab.grid(True)

    if not filename:
        filename = "feat_hist_%s.png" % name

    pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
Пример #19
0
def cdf(x,colsym="",lab="",lw=4):
    """ plot the cumulative density function

    Parameters
    ----------

    x : np.array()
    colsym : string
    lab : string
    lw : int
        linewidth

    Examples
    --------

    >>> import numpy as np

    """
    rcParams['legend.fontsize']=20
    rcParams['font.size']=20

    x  = np.sort(x)
    n  = len(x)
    x2 = np.repeat(x, 2)
    y2 = np.hstack([0.0, repeat(np.arange(1,n) / float(n), 2), 1.0])
    plt.plot(x2,y2,colsym,label=lab,linewidth=lw)
    plt.grid('on')
    plt.legend(loc=2)
    plt.xlabel('Ranging Error[m]')
    plt.ylabel('Cumulative Probability')
Пример #20
0
def plot_peaks(spectra, ref, zl, pl, filename=''):
  plt.figure();
  plt.title("Debug: Peak fitting for '%s'" % filename);
  plt.xlabel("y-position [px]");
  plt.ylabel("Intensity");

  Nspectra, Npx = spectra.shape;

  for s in xrange(Nspectra):
    scale = 1./spectra.max();
    offset= -s*0.1;

    # plot data
    plt.plot(spectra[s]*scale + offset,'k',linewidth=2);

    # plot first peak
    p,A,w = zl[s];
    x = np.arange(-2*w, 2*w) + p;
    plt.plot(x,gauss(x,*zl[s])*scale + offset,'r');
  
    # plot second peak
    if ref is not None:
      p,A   = pl[s];
      x = np.arange(len(ref)) - len(ref)/2 + p;
      plt.plot(x,ref/ref.max()*A*scale + offset,'g');
Пример #21
0
def fancy_dendrogram(*args, **kwargs):
    '''
    Source: https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
    '''
    from scipy.cluster import hierarchy
    import matplotlib.pylab as plt
    
    max_d = kwargs.pop('max_d', None)
    if max_d and 'color_threshold' not in kwargs:
        kwargs['color_threshold'] = max_d
    annotate_above = kwargs.pop('annotate_above', 0)

    ddata = hierarchy.dendrogram(*args, **kwargs)

    if not kwargs.get('no_plot', False):
        plt.title('Hierarchical Clustering Dendrogram (truncated)')
        plt.xlabel('sample index or (cluster size)')
        plt.ylabel('distance')
        for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
            x = 0.5 * sum(i[1:3])
            y = d[1]
            if y > annotate_above:
                plt.plot(x, y, 'o', c=c)
                plt.annotate("%.3g" % y, (x, y), xytext=(0, -5),
                             textcoords='offset points',
                             va='top', ha='center')
        if max_d:
            plt.axhline(y=max_d, c='k')
    return ddata
def modelfit(alg, train, target, test, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
    
    if useTrainCV:
        xgboost_params = alg.get_xgb_params()
        xgtrain = xgb.DMatrix(train.values, label=target.values)
        xgtest = xgb.DMatrix(test.values)
        watchlist = [(xgtrain, 'train')] # Specify validations set to watch performance
        cvresult = xgb.cv(xgboost_params, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds, early_stopping_rounds=early_stopping_rounds) #metrics='auc',show_progress=False
        alg.set_params(n_estimators=cvresult.shape[0])
    
    # Fit the algorithm on the data
    alg.fit(train, target, eval_metric='auc')

    # Predict training set:
    train_preds = alg.predict(train)
    train_predprob = alg.predict_proba(train)[:,1]
    
    # Print model report:
    print "\nModel Report"
    print "Accuracy : %.4g" % metrics.accuracy_score(target.values, train_preds)
    print "AUC Score (Train): %f" % metrics.roc_auc_score(target, train_predprob)

    # Make a prediction:
    print('Predicting......')
    test_predprob = alg.predict_proba(test)[:,1]

    feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
    feat_imp.plot(kind='bar', title='Feature Importances')
    plt.ylabel('Feature Importance Score')
    #plt.show()
    return test_predprob
Пример #23
0
def fdr(p_values=None, verbose=0):
    """Returns the FDR associated with each p value

    Parameters
    -----------
    p_values : ndarray of shape (n)
        The samples p-value

    Returns
    -------
    q : array of shape(n)
        The corresponding fdr values
    """
    p_values = check_p_values(p_values)
    n_samples = p_values.size
    order = p_values.argsort()
    sp_values = p_values[order]

    # compute q while in ascending order
    q = np.minimum(1, n_samples * sp_values / np.arange(1, n_samples + 1))
    for i in range(n_samples - 1, 0, - 1):
        q[i - 1] = min(q[i], q[i - 1])

    # reorder the results
    inverse_order = np.arange(n_samples)
    inverse_order[order] = np.arange(n_samples)
    q = q[inverse_order]

    if verbose:
        import matplotlib.pylab as mp
        mp.figure()
        mp.xlabel('Input p-value')
        mp.plot(p_values, q, '.')
        mp.ylabel('Associated fdr')
    return q
Пример #24
0
def plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):
    """
    Plot a radiallysymmetric Q model.

    plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):

    r_min=minimum radius [km], r_max=maximum radius [km], dr=radius
    increment [km]

    Currently available models (model): cem, prem, ql6
    """
    import matplotlib.pylab as plt

    r = np.arange(r_min, r_max + dr, dr)
    q = np.zeros(len(r))

    for k in range(len(r)):

        if model == 'cem':
            q[k] = q_cem(r[k])
        elif model == 'ql6':
            q[k] = q_ql6(r[k])
        elif model == 'prem':
            q[k] = q_prem(r[k])

    plt.plot(r, q, 'k')
    plt.xlim((0.0, r_max))
    plt.xlabel('radius [km]')
    plt.ylabel('Q')
    plt.show()
    def handle(self, *args, **options):
        try:
            from matplotlib import pylab as pl
            import numpy as np
        except ImportError:
            raise Exception('Be sure to install requirements_scipy.txt before running this.')

        all_names_and_counts = RawCommitteeTransactions.objects.all().values('attest_by_name').annotate(total=Count('attest_by_name')).order_by('-total')
        all_names_and_counts_as_tuple_and_sorted = sorted([(row['attest_by_name'], row['total']) for row in all_names_and_counts], key=lambda row: row[1])
        print "top ten attestors:  (name, number of transactions they attest for)"
        for row in all_names_and_counts_as_tuple_and_sorted[-10:]:
            print row

        n_bins = 100
        filename = 'attestor_participation_distribution.png'

        x_max = all_names_and_counts_as_tuple_and_sorted[-31][1]  # eliminate top outliers from hist
        x_min = all_names_and_counts_as_tuple_and_sorted[0][1]

        counts = [row['total'] for row in all_names_and_counts]
        pl.figure(1, figsize=(18, 6))
        pl.hist(counts, bins=np.arange(x_min, x_max, (float(x_max)-x_min)/100) )
        pl.title('Histogram of Attestor Participation in RawCommitteeTransactions')
        pl.xlabel('Number of transactions a person attested for')
        pl.ylabel('Number of people')
        pl.savefig(filename)
Пример #26
0
def plot_runtime_results(results):
    plt.rcParams["figure.figsize"] = 7,7
    plt.rcParams["font.size"] = 22
    matplotlib.rc("xtick", labelsize=24)
    matplotlib.rc("ytick", labelsize=24)

    params = {"text.fontsize" : 32,
              "font.size" : 32,
              "legend.fontsize" : 30,
              "axes.labelsize" : 32,
              "text.usetex" : False
              }
    plt.rcParams.update(params)
    
    #plt.semilogx(results[:,0], results[:,3], 'r-x', lw=3)
    #plt.semilogx(results[:,0], results[:,1], 'g-D', lw=3)
    #plt.semilogx(results[:,0], results[:,2], 'b-s', lw=3)

    plt.plot(results[:,0], results[:,3], 'r-x', lw=3, ms=10)
    plt.plot(results[:,0], results[:,1], 'g-D', lw=3, ms=10)
    plt.plot(results[:,0], results[:,2], 'b-s', lw=3, ms=10)

    plt.legend(["Chain", "Tree", "FFT Tree"], loc="upper left")
    plt.xticks([1e5, 2e5, 3e5])
    plt.yticks([0, 60, 120, 180])

    plt.xlabel("Problem Size")
    plt.ylabel("Runtime (sec)")
    return results
def flipPlot(minExp, maxExp):
    """假定minEXPy和maxExp是正整数且minExp<maxExp
    绘制出2**minExp到2**maxExp次抛硬币的结果
    """
    ratios = []
    diffs = []
    aAxis = []
    for i in range(minExp, maxExp+1):
        aAxis.append(2**i)
    for numFlips in aAxis:
        numHeads = 0
        for n in range(numFlips):
            if random.random() < 0.5:
                numHeads += 1
        numTails = numFlips - numHeads
        ratios.append(numHeads/numFlips)
        diffs.append(abs(numHeads-numTails))
    plt.figure()
    ax1 = plt.subplot(121)
    plt.title("Difference Between Heads and Tails")
    plt.xlabel('Number of Flips')
    plt.ylabel('Abs(#Heads - #Tails)')
    ax1.semilogx(aAxis, diffs, 'bo')
    ax2 = plt.subplot(122)
    plt.title("Heads/Tails Ratios")
    plt.xlabel('Number of Flips')
    plt.ylabel("#Heads/#Tails")
    ax2.semilogx(aAxis, ratios, 'bo')
    plt.show()
Пример #28
0
    def plot_corner_posteriors(self, savefile=None, labels=["T1", "R1", "Av", "T2", "R2"]):
        '''
        Plots the corner plot of the MCMC results.
        '''
        ndim = len(self.sampler.flatchain[0,:])
        chain = self.sampler
        samples = chain.flatchain
        
        samples = samples[:,0:ndim]  
        plt.figure(figsize=(8,8))
        fig = corner.corner(samples, labels=labels[0:ndim])
        plt.title("MJD: %.2f"%self.mjd)
        name = self._get_save_path(savefile, "mcmc_posteriors")
        plt.savefig(name)
        plt.close("all")
        

        plt.figure(figsize=(8,ndim*3))
        for n in range(ndim):
            plt.subplot(ndim,1,n+1)
            chain = self.sampler.chain[:,:,n]
            nwalk, nit = chain.shape
            
            for i in np.arange(nwalk):
                plt.plot(chain[i], lw=0.1)
                plt.ylabel(labels[n])
                plt.xlabel("Iteration")
        name_walkers = self._get_save_path(savefile, "mcmc_walkers")
        plt.tight_layout()
        plt.savefig(name_walkers)
        plt.close("all")  
Пример #29
0
def plotMassFunction(im, pm, outbase, mmin=9, mmax=13, mstep=0.05):
    """
    Make a comparison plot between the input mass function and the 
    predicted projected correlation function
    """
    plt.clf()

    nmbins = ( mmax - mmin ) / mstep
    mbins = np.logspace( mmin, mmax, nmbins )
    mcen = ( mbins[:-1] + mbins[1:] ) /2
    
    plt.xscale( 'log', nonposx = 'clip' )
    plt.yscale( 'log', nonposy = 'clip' )
    
    ic, e, p = plt.hist( im, mbins, label='Original Halos', alpha=0.5, normed = True)
    pc, e, p = plt.hist( pm, mbins, label='Added Halos', alpha=0.5, normed = True)
    
    plt.legend()
    plt.xlabel( r'$M_{vir}$' )
    plt.ylabel( r'$\frac{dN}{dM}$' )
    #plt.tight_layout()
    plt.savefig( outbase+'_mfcn.png' )
    
    mdtype = np.dtype( [ ('mcen', float), ('imcounts', float), ('pmcounts', float) ] )
    mf = np.ndarray( len(mcen), dtype = mdtype )
    mf[ 'mcen' ] = mcen
    mf[ 'imcounts' ] = ic
    mf[ 'pmcounts' ] = pc

    fitsio.write( outbase+'_mfcn.fit', mf )
Пример #30
0
	def plot_cell(self,cell_number=0,label='insert_label'):

		current_cell = self.cell_list[cell_number]
		temp = current_cell.temp
		cd_signal = current_cell.cd_signal
		cd_calc = current_cell.cd_calc()
		
		ax = pylab.gca()

		pylab.plot(temp,cd_signal,'o',color='black')
                pylab.plot(temp,cd_calc,color='black')
		pylab.xlabel(r'Temperature ($^{\circ}$C)')
		pylab.ylabel('mdeg')
		pylab.ylim([-25,-4])
		dH = numpy.round(current_cell.dH, decimals=1)
		Tm = numpy.round(current_cell.Tm-273.15, decimals=1)
		nf = current_cell.nf
		nu = current_cell.nu
		textstr_dH = '${\Delta}H_{m}$ = %.1f kcal/mol' %dH
		textstr_Tm ='$T_{m}$ = %.1f $^{\circ}$C' %Tm
		textstr_nf ='$N_{folded}$ = %d' %nf
		textstr_nu ='$N_{unfolded}$ = %d'%nu
		ax.text(8,-6,textstr_dH, fontsize=16,ha='left',va='top')
		ax.text(8,-7.5,textstr_Tm, fontsize=16,ha='left',va='top')
		ax.text(8,-9,textstr_nf, fontsize=16,ha='left',va='top')
		ax.text(8,-10.5,textstr_nu, fontsize=16,ha='left',va='top')
		pylab.title(label)		
		pylab.show()

		return
Пример #31
0
#%%
# The fourth subplot, which shows the optimization of Cs
def make_patch_spines_invisible(ax):
    ax.set_frame_on(True)
    ax.patch.set_visible(False)
    for sp in ax.spines.values():
        sp.set_visible(False)


dsWD = [4, 7, 4, 7]  # the line dash of the first configuration

ax = plt.subplot(grid[1, 13:])  #plt.subplot(2,4,7)
plt.title('(d)', fontsize=fs)

plt.xlabel('$c_s$                   ', fontsize=fs)
plt.ylabel('Mean $W_d$ (10$^{5}$ km)', fontsize=fs, color=color1)

# the wasserstein axis
#ax.set_yticklabels(labels=[-2,-1,0,1,2,3,4,5],fontsize=fs, color='k')

#a0 = sns.lineplot(x=CS,y=lsq/10**5, linewidth = lw, ax=ax, color=color1,
#             zorder=9)

a0 = sns.lineplot(x=CS,
                  y=np.full(len(lsq), mean_distance_mm / 10**5),
                  linewidth=lw,
                  ax=ax,
                  color=color1,
                  zorder=10)
a0.lines[0].set_linestyle(":")
sns.lineplot(x=CS,
Пример #32
0
    def train(self,
              X_train,
              Y_train,
              X_test,
              Y_test,
              print_accuracy=False,
              show_loss_graph=False,
              show_accuracy_graph=False):
        x_train, y_train = self.normalize_vec(X_train, Y_train)
        x_test, y_test = self.normalize_vec(X_test, Y_test)
        train_loss = []
        train_acc_histroy = []
        test_acc_history = []
        params = {}

        for i in range(self.epoch_num):
            loss_per_epoch = 0
            accuracy_per_epoch = 0
            minibatches = shuffle_batches(x_train, y_train, self.batch_size)
            for j in range(len(minibatches)):
                x_batch = minibatches[j][0]
                y_batch = minibatches[j][1]

                grads = self.gradient(x_batch, y_batch)
                params = {
                    'W1': self.W1,
                    'b1': self.b1,
                    'W2': self.W2,
                    'b2': self.b2,
                    'gamma': self.gamma,
                    'beta': self.beta
                }
                self.optimizer.update(params, grads)

                if self.use_bn:
                    self.running_mean = self.layers['BatchNorm'].running_mean
                    self.running_var = self.layers['BatchNorm'].running_var

                loss = self.loss(x_batch, y_batch)
                loss_per_epoch += loss

            ave_loss = loss_per_epoch / len(minibatches)
            train_loss.append(ave_loss)

            if print_accuracy:
                train_accuracy = self.accuracy(x_train, y_train)
                test_accuracy = self.accuracy(x_test, y_test)
                train_acc_histroy.append(train_accuracy / X_train.shape[0])
                test_acc_history.append(test_accuracy / X_test.shape[0])

                print("Epoch {i}: Loss={ave_loss}  Accuracy_train={train_acc:.4f}  Accuracy_test={test_acc:.4f}"\
                    .format(i=i,ave_loss=ave_loss,train_acc=train_accuracy/X_train.shape[0],test_acc=test_accuracy/X_test.shape[0]))

            else:
                print("Epoch {i}: Loss={ave_loss}".format(i))
        print("Final test_accuracy: {acc}".format(
            acc=self.accuracy(x_test, y_test) / X_test.shape[0]))

        path = os.path.dirname(os.path.abspath(__file__))
        if show_loss_graph:
            x = np.linspace(0, self.epoch_num, self.epoch_num)
            plt.plot(x, train_loss, label='loss')
            plt.title('Average Loss of Each Epoch--{activation}'.format(
                activation=self.activation))
            plt.xlabel('epoch')
            plt.ylabel('loss')
            plt.xlim(left=0)
            plt.ylim(bottom=0)
            # plt.savefig(os.path.join(path,'fig_loss_{act_f}_{mid_node_num}n_{epoch_num}e.png'\
            #     .format(act_f=self.activation,mid_node_num=self.sizes[1], epoch_num=self.epoch_num)))
            plt.show()
        if show_accuracy_graph:
            x2 = np.linspace(0, len(test_acc_history), len(test_acc_history))
            plt.plot(x2, train_acc_histroy, label='train accuracy')
            plt.plot(x2,
                     test_acc_history,
                     label='test accuracy',
                     linestyle='--')
            plt.title('Accuracy After Each Epoch--{activation}'.format(
                activation=self.activation))
            plt.xlabel('epoch')
            plt.ylabel('accuracy')
            plt.xlim(left=0)
            plt.ylim(0, 1.0)
            plt.legend(loc='lower right')
            # plt.savefig(os.path.join(path,'fig_acc_{act_f}_{mid_node_num}n_{epoch_num}e.png'\
            #     .format(act_f=self.activation,mid_node_num=self.sizes[1], epoch_num=self.epoch_num)))
            plt.show()
        if self.use_bn:
            params['W1'], params['b1'], params['W2'], params['b2'] ,params['gamma'], params['beta'], params['running_mean'], params['running_var']= \
            self.W1, self.b1, self.W2, self.b2, self.gamma, self.beta, self.running_mean,self.running_var
        else:
            params['W1'], params['b1'], params['W2'], params['b2'] ,params['gamma'], params['beta']= \
            self.W1, self.b1, self.W2, self.b2, self.gamma, self.beta
        print("Network Architecture:{a_f}-Dropout({use_dropout}({drop_p}))-BatchNorm({use_bn})"\
            .format(a_f=self.activation,use_dropout=self.use_dropout,drop_p=self.dropout_p, use_bn=self.use_bn))
        print("Training, Done!")

        return params
Пример #33
0
    return a * b


h = np.linspace(0.0001, 1 - 1e-5, 100)
y = prob(h, cadena)
m = np.trapz(y, x=h)
y_new = y / m

#Valores importantes
a = np.where(y_new == np.max(y_new))
H_0 = h[a[0][0]]

valor = np.log(y_new)[a[0][0] +
                      1] - 2 * np.log(y_new)[a[0][0]] + np.log(y_new)[a[0][0] -
                                                                      1]
deriv2 = valor / (H_0 - h[a[0][0] - 1])**2
sigma = 1 / np.sqrt(-deriv2)


def aprox(h):
    A = 1.0 / (sigma * (np.sqrt(2 * np.pi)))
    B = np.exp(-(h - H_0)**2 / (2 * (sigma**2)))
    return A * B


plt.plot(h, aprox(h), '--')
plt.plot(h, y_new)
plt.xlabel('H')
plt.ylabel('P(H|{datos})')
plt.title('H = {:.2f} {} {:.2f}'.format(H_0, '$\pm$', sigma))
plt.savefig('coins.png')
Пример #34
0
for i in k:
    kmeans = KMeans(n_clusters=i)
    kmeans.fit(df_norm)
    WSS = []  # variable for storing within sum of squares for each cluster
    for j in range(i):
        WSS.append(
            sum(
                cdist(df_norm.iloc[kmeans.labels_ == j, :],
                      kmeans.cluster_centers_[j].reshape(1, df_norm.shape[1]),
                      "euclidean")))
    TWSS.append(sum(WSS))

# Scree plot
plt.plot(k, TWSS, 'ro-')
plt.xlabel("No_of_Clusters")
plt.ylabel("total_within_SS")
plt.xticks(k)

# Selecting 5 clusters from the above scree plot which is the optimum number of clusters
model = KMeans(n_clusters=5)
model.fit(df_norm)

model.labels_  # getting the labels of clusters assigned to each row
md = pd.Series(
    model.labels_)  # converting numpy array into pandas series object
Univ['clust'] = md  # creating a  new column and assigning it to new column
df_norm.head()

Univ = Univ.iloc[:, [7, 0, 1, 2, 3, 4, 5, 6]]

Univ.iloc[:, 1:7].groupby(Univ.clust).mean()
Пример #35
0
                   0,
                   self.window + 2,
                   color="k",
                   linestyles="dashed")
        plt.xlabel("Lag")
        plt.ylabel("Autocorrelation")
        plt.title("Correlogram for Forecast Error")
        plt.show()


hw = HoltWinters("demand.csv", window=12)
hw.fit_holt_winters()

forecast_one_step, forecast_error = hw.holt_winters_best_fit()
future_mean, future_lb, future_ub = hw.predict(forecast_n_steps=12, n_sims=500)

# Plot correlogram
hw.plot_correlogram(forecast_error)

# Plot forecast
plt.plot(hw.ts.keys(), hw.ts.values(), "-k", label="raw")
plt.plot(hw.ts.keys(), forecast_one_step, "--k", label="holt-winters")

x_future_min = max(hw.ts.keys()) + 1
x_future = range(x_future_min, x_future_min + len(future_mean))
plt.plot(x_future, future_mean, color='green', label="95% CI")
plt.fill_between(x_future, future_ub, future_lb, color="green", alpha=0.3)
plt.legend(loc="best")
plt.xlabel("Date Index")
plt.ylabel("Metric")
plt.show()
Пример #36
0
        trainData = pca.transform(trainData)
        testData = pca.transform(testData)

        totalData = numpy.concatenate((trainData, testData), axis=0)
        totalData = scale(totalData)
        trainData = totalData[0:len(trainData)]
        testData = totalData[len(trainData):]
        # print(trainData[0:5])

        clf = SVC(probability=True)
        clf.fit(trainData, trainLabel)
        # joblib.dump(clf, savepath + 'Parameter.m')
        probability = clf.predict_proba(testData)
        fpr, tpr, thresholds = roc_curve(testLabel, probability[:, 1])
        mean_tpr += interp(mean_fpr, fpr,
                           tpr)  # 对mean_tpr在mean_fpr处进行插值,通过scipy包调用interp()函数
        mean_tpr[0] = 0.0  # 初始处为0
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, lw=1, label='ROC fold %d' % appoint)
        aucList.append(roc_auc)

    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title(used)
    plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
    plt.legend()
    plt.show()

    for sample in aucList:
        print(sample)
Пример #37
0
         #    plt.xlim([xmin,xmax])
         #    plt.ylim([xmin,xmax])
         #    plt.xticks([])
         #    plt.yticks([])
         #plt.text(-60,20,'Disc loss: %f, NELBO: %f, KL0: %f, KL1: %f, KL2: %f, KL3: %f, KL4: %f' % (dl, NELBO, KL0, KL1, KL2, KL3, KL4))
         #plt.text(-28,-7,'KLAVG: %f' %(np.mean([KL0,KL1,KL2,KL3,KL4])))
         #plt.savefig('FiguresPCADV\Fig %i'%(j))
         #plt.close()
 np.savetxt("PCADV truklavg.csv", KLAVGBRUH, delimiter=",")
 np.savetxt("PCADV estimatorloss.csv", ISTHISLOSS, delimiter=",")
 np.savetxt("PCADV nelbo.csv", NELBRUH, delimiter=",")
 np.savetxt("PCADV converge.csv", converge)
 plt.plot(KLAVGBRUH)
 plt.axvline(x=(converge[0] / 100))
 plt.xlabel('Iterations (x100)')
 plt.ylabel('Avg KL Div')
 plt.savefig("PCADV truklavg")
 plt.close()
 plt.plot(ISTHISLOSS)
 plt.axvline(x=converge[0])
 plt.xlabel('Iterations')
 plt.ylabel('Estimator Loss')
 plt.savefig("PCADV estimatorloss")
 plt.close()
 plt.plot(NELBRUH)
 plt.axvline(x=converge[0])
 plt.xlabel('Iterations')
 plt.ylabel('NELBO')
 plt.savefig('PCADV nelbo')
 plt.close()
 #nice plot
Пример #38
0
def main():

    #Load Data
    dfGlucose = pd.read_csv(os.path.join('blood-glucose-data.csv'),
                            sep=',',
                            index_col='point_timestamp',
                            error_bad_lines=True)
    dfActivity = pd.read_csv(os.path.join('distance-activity-data.csv'),
                             sep=',',
                             index_col='point_timestamp',
                             error_bad_lines=True)
    dfHeart = pd.read_csv(os.path.join('heart-rate-data.csv'),
                          sep=',',
                          index_col='point_timestamp',
                          error_bad_lines=True)

    #Convert point_value to numeric
    dfGlucose['point_value(mg/dL)'] = dfGlucose['point_value(mg/dL)'].astype(
        dtype=float)
    dfActivity['point_value(kilometers)'] = dfActivity[
        'point_value(kilometers)'].astype(dtype=float)
    dfHeart['point_value'] = dfHeart['point_value'].astype(dtype=float)

    #Convert index to time/data
    dfGlucose.index = pd.to_datetime(dfGlucose.index)
    dfActivity.index = pd.to_datetime(dfActivity.index)
    dfHeart.index = pd.to_datetime(dfHeart.index)

    #Aggregate data of Activity and Heart
    #Activity
    Activity = aggregate(dfGlucose.index, dfActivity,
                         'point_value(kilometers)', 'sum')
    #Heart
    Heart = aggregate(dfGlucose.index, dfHeart, 'point_value', 'mean')

    #Plot and save the glucose values to explore Data
    plt.figure()
    plt.plot(dfGlucose.index,
             dfGlucose['point_value(mg/dL)'].values,
             label='Glucose')
    pylab.xlabel('Timestamp')
    pylab.ylabel('Glucose (mg/dL)')
    plt.legend(loc=0)
    plt.grid('on')
    plt.savefig("Glucose_point_value(mg-dL).png")
    plt.close()

    #Plot and save the Activity values to explore Data
    plt.figure()
    plt.plot(dfGlucose.index, Activity, label='Aggregated Activity')
    pylab.xlabel('Timestamp')
    pylab.ylabel('Agg Activity')
    plt.legend(loc=0)
    plt.grid('on')
    plt.savefig("Activity_aggregated.png")
    plt.close()

    #Plot and save the Heart values to explore Data
    plt.figure()
    plt.plot(dfGlucose.index, Heart, label='Aggregated Heart')
    pylab.xlabel('Timestamp')
    pylab.ylabel('Agg Heart')
    plt.legend(loc=0)
    plt.grid('on')
    plt.savefig("Heart_aggregated.png")
    plt.close()

    #Separating instances of forecasting (discarting weeks with missing data)
    '''For generate the whole set of slided data uncomment line 130 and comment line 131.
    In order to save your time, the current configuration only generates data with the
    Best valuated window sizes of the techniques. '''

    #for window_size in range(48,150,4):
    for window_size in range(52, 69, 4):
        horizon = 12
        #Transforming the data in instances of regression (univariate)
        print('Sliding window_size: ' + str(window_size) + ' for glucose_only')
        PipelineWindow(dfGlucose['point_value(mg/dL)'], [], window_size,
                       horizon, 'glucose_only')
        print('Sliding window_size: ' + str(window_size) +
              ' for glucose_activity')
        PipelineWindow(dfGlucose['point_value(mg/dL)'], [Activity],
                       window_size, horizon, 'glucose_activity')
        print('Sliding window_size: ' + str(window_size) +
              ' for glucose_activity_heart')
        PipelineWindow(dfGlucose['point_value(mg/dL)'], [Activity, Heart],
                       window_size, horizon, 'glucose_activity_heart')
Пример #39
0
def func(comp, param, trans_costs, critical_level, error_level):

    dfa = df0[0:comp + param]
    dfb = df00[0:comp + param]

    #NOTE CRITICAL LEVEL HAS BEEN SET TO 5% FOR COINTEGRATION TEST

    def find_cointegrated_pairs(dataframe, critical_level=0.05):
        n = dataframe.shape[1]  # the length of dateframe
        pvalue_matrix = np.ones((n, n))  # initialize the matrix of p
        keys = dataframe.columns  # get the column names
        pairs = []  # initilize the list for cointegration
        for i in range(n):
            for j in range(i + 1, n):  # for j bigger than i
                stock1 = dataframe[keys[i]]  # obtain the price of "stock1"
                stock2 = dataframe[keys[j]]  # obtain the price of "stock2"
                result = sm.tsa.stattools.coint(stock1,
                                                stock2)  # get conintegration
                pvalue = result[1]  # get the pvalue
                pvalue_matrix[i, j] = pvalue
                if pvalue < critical_level:  # if p-value less than the critical level
                    pairs.append(
                        (keys[i], keys[j],
                         pvalue))  # record the contract with that p-value
        return pvalue_matrix, pairs

    #set up the split point for our "training data" on which to perform the co-integration test (the remaining data will be fed to our backtest function)
    split = param

    #run our dataframe (up to the split point) of ticker price data through our co-integration function and store results
    pvalue_matrix, pairs = find_cointegrated_pairs(dfa[:-split],
                                                   critical_level)

    def half_life(spread):
        spread_lag = spread.shift(1)
        spread_lag.iloc[0] = spread_lag.iloc[1]
        spread_ret = spread - spread_lag
        spread_ret.iloc[0] = spread_ret.iloc[1]
        spread_lag2 = sm.add_constant(spread_lag)
        model = sm.OLS(spread_ret, spread_lag2)
        res = model.fit()
        halflife = int(round(-np.log(2) / res.params[1], 0))

        if halflife <= 0:
            halflife = 1
        return halflife

    def backtest(dfa, dfb, param, s1, s2, error_level, trans_costs=False):
        #############################################################
        # INPUT:
        # DataFrame of prices
        # s1: the symbol of contract one
        # s2: the symbol of contract two
        # x: the price series of contract one
        # y: the price series of contract two
        # OUTPUT:
        # df1['cum rets']: cumulative returns in pandas data frame
        # sharpe: Sharpe ratio
        # CAGR: Compound Annual Growth Rate

        #Kalman filter params
        delta = 1e-4
        wt = delta / (1 - delta) * np.eye(2)
        vt = 1e-3
        theta = np.zeros(2)
        C = np.zeros((2, 2))
        R = None

        # n = 4
        # s1 = pairs[n][0]
        # s2 = pairs[n][1]

        x = list(dfa[s1])
        y = list(dfa[s2])
        ts_x = list(dfb[s1])
        ts_y = list(dfb[s2])

        #X = sm.add_constant(x)

        # hrs = [0]*len(df)
        Fs = [0] * len(dfa)
        Rs = [0] * len(dfa)
        ets = [0] * len(dfa)
        Qts = [0] * len(dfa)
        sqrt_Qts = [0] * len(dfa)
        thetas = [0] * len(dfa)
        Ats = [0] * len(dfa)
        Cs = [0] * len(dfa)

        for i in range(len(dfa) - param - 10, len(dfa)):
            # res = sm.OLS(y[i-split+1:i+1],X[i-split+1:i+1]).fit()
            # hr[i] = res.params[1]

            F = np.asarray([x[i], 1.0]).reshape((1, 2))
            Fs[i] = F
            if R is not None:
                R = C + wt
            else:
                R = np.zeros((2, 2))
            Rs[i] = R

            # Calculate the Kalman Filter update
            # ----------------------------------
            # Calculate prediction of new observation
            # as well as forecast error of that prediction

            yhat = F.dot(theta)
            et = y[i] - yhat
            ets[i] = et[0]

            # Q_t is the variance of the prediction of
            # observations and hence \sqrt{Q_t} is the
            # standard deviation of the predictions
            Qt = F.dot(R).dot(F.T) + vt
            sqrt_Qt = np.sqrt(Qt)[0][0]

            Qts[i] = Qt
            sqrt_Qts[i] = sqrt_Qt

            # The posterior value of the states \theta_t is
            # distributed as a multivariate Gaussian with mean
            # m_t and variance-covariance C_t
            At = R.dot(F.T) / Qt
            theta = theta + At.flatten() * et
            C = R - At * F.dot(R)

            thetas[i] = theta[0]
            Ats[i] = At
            Cs[i] = C

        dfa['et'] = ets
        # df['et'] = df['et'].rolling(5).mean()
        dfa['sqrt_Qt'] = sqrt_Qts
        dfa['theta'] = thetas

        # run regression (including Kalman Filter) to find hedge ratio and then create spread series
        df1 = pd.DataFrame({
            'y': y,
            'x': x,
            'ts_x': ts_x,
            'ts_y': ts_y,
            'et': dfa['et'],
            'sqrt_Qt': dfa['sqrt_Qt'],
            'theta': dfa['theta']
        })[-param:]

        # df1[['et','sqrt_Qt']].plot()

        # df1['spread0'] = df1['y'] - df1['theta']*df1['x']

        # # calculate half life
        # halflife = half_life(df1['spread0'])

        # # calculate z-score with window = half life period
        # meanSpread = df1.spread0.rolling(window=halflife).mean()
        # stdSpread = df1.spread0.rolling(window=halflife).std()
        # df1['zScore'] = (df1.spread0-meanSpread)/stdSpread

        # #############################################################
        # # Trading logic
        # entryZscore = 0.8
        # exitZscore = 0.2

        #set up num units long
        # df1['long entry'] = ((df1.zScore < - entryZscore) & (df1.zScore.shift(1) > - entryZscore))
        # df1['long exit'] = ((df1.zScore > - exitZscore) & (df1.zScore.shift(1) < - exitZscore))

        threshold = error_level * 0.001
        df1['long entry'] = ((df1.et < -threshold) &
                             (df1.et.shift(1) > -threshold))
        df1['long exit'] = ((df1.et > 0) & (df1.et.shift(1) < 0))

        df1['num units long'] = np.nan
        df1.loc[df1['long entry'], 'num units long'] = 1
        df1.loc[df1['long exit'], 'num units long'] = 0
        df1['num units long'][0] = 0
        df1['num units long'] = df1['num units long'].fillna(method='pad')

        #set up num units short
        # df1['short entry'] = ((df1.zScore > entryZscore) & (df1.zScore.shift(1) < entryZscore))
        # df1['short exit'] = ((df1.zScore < exitZscore) & (df1.zScore.shift(1) > exitZscore))

        df1['short entry'] = ((df1.et > threshold) &
                              (df1.et.shift(1) < threshold))
        df1['short exit'] = ((df1.et < 0) & (df1.et.shift(1) > 0))

        df1.loc[df1['short entry'], 'num units short'] = -1
        df1.loc[df1['short exit'], 'num units short'] = 0
        df1['num units short'][0] = 0
        df1['num units short'] = df1['num units short'].fillna(method='pad')

        df1['numUnits'] = df1['num units long'] + df1['num units short']
        df1['signals'] = df1['numUnits'].diff()
        #df1['signals'].iloc[0] = df1['numUnits'].iloc[0]

        df1['yfrets'] = df1['y'].pct_change().shift(-1)
        df1['xfrets'] = df1['x'].pct_change().shift(-1)

        if trans_costs == True:
            df1['spread'] = (df1['y'] * (1 + df1['signals'] * df1['ts_y'])) - (
                df1['theta'] * (df1['x'] * (1 - df1['signals'] * df1['ts_x'])))
            #df1['spread'] = (df1['y']*(1+df1['signals']*0.0001))
        else:
            df1['spread'] = df1['y'] - df1['theta'] * df1['x']
            #df1['spread'] = df1['y']

        df1['spread pct ch'] = (df1['spread'] - df1['spread'].shift(1)) / (
            (df1['x'] * abs(df1['theta'])) + df1['y'])
        #df1['spread pct ch'] = (df1['spread'] - df1['spread'].shift(1)) / df1['spread'].shift(1)
        df1['port rets'] = df1['spread pct ch'] * df1['numUnits'].shift(1)

        df1['cum rets'] = df1['port rets'].cumsum()
        df1['cum rets'] = df1['cum rets'] + 1

        #df1 = df1.dropna()
        ##############################################################

        try:
            sharpe = ((df1['port rets'].mean() / df1['port rets'].std()) *
                      sqrt(252 * 2))
        except ZeroDivisionError:
            sharpe = 0.0

        ##############################################################
        start_val = 1
        end_val = df1['cum rets'].iloc[-1]

        # print(len(df1[df1['long entry']==True])+len(df1[df1['short entry']==True]))
        # print(end_val)

        start_date = df1.iloc[0].name
        end_date = df1.iloc[-1].name

        days = (end_date - start_date).days

        CAGR = round(((float(end_val) / float(start_val))**(252.0 / days)) - 1,
                     4)

        df1[s1 + " " + s2] = df1['cum rets']

        return df1[s1 + " " + s2], sharpe, CAGR

    results = []

    for pair in pairs:

        rets, sharpe, CAGR = backtest(dfa, dfb, param, pair[0], pair[1],
                                      trans_costs, error_level)
        results.append(rets)
        #print("The pair {} and {} produced a Sharpe Ratio of {} and a CAGR of {}".format(pair[0],pair[1],round(sharpe,2),round(CAGR,4)))
        #rets.plot(figsize=(20,15),legend=True)

    #concatenate together the individual equity curves into a single DataFrame
    try:
        results_df = pd.concat(results, axis=1).dropna()

        #equally weight each equity curve by dividing each by the number of pairs held in the DataFrame
        results_df /= len(results_df.columns)

        #sum up the equally weighted equity curves to get our final equity curve
        final_res = results_df.sum(axis=1)

        #plot the chart of our final equity curve
        plt.figure()
        final_res.plot(figsize=(20, 15))
        plt.title('Between {} and {}'.format(
            str(results_df.index[0])[:10],
            str(results_df.index[-1])[:10]))
        plt.xlabel('Date')
        plt.ylabel('Returns')

        #calculate and print our some final stats for our combined equity curve
        try:
            sharpe = (final_res.pct_change().mean() /
                      final_res.pct_change().std()) * (sqrt(252 * 2))
        except ZeroDivisionError:
            sharpe = 0.0
        start_val = 1
        end_val = final_res.iloc[-1]

        start_date = final_res.index[0]
        end_date = final_res.index[-1]

        days = (end_date - start_date).days

        CAGR = round(((float(end_val) / float(start_val))**(252.0 / days)) - 1,
                     4)
        print("Sharpe Ratio is {} and CAGR is {}".format(
            round(sharpe, 2), round(CAGR, 4)))
    except ValueError:
        # return "No result"
        print('No pair found')
import numpy as np
import sys

bins = np.linspace(-100, 100, 101)
std = np.sqrt(1000)
mu = 0.


def gauss(x, mu, sigma):
    val = np.exp(-(x - mu)**2 / (2. * sigma**2))
    val /= sigma * np.sqrt(2 * np.pi)
    return val


vgauss = np.vectorize(gauss, excluded=['mu', 'sigma'])
for fl in sys.argv[1:]:
    f = open(fl)
    values = []

    for line in f:
        values.append(float(line))

    plt.hist(values, bins=bins, histtype='step', normed=True)

plt.plot(bins, vgauss(bins, mu, std), color='black', lw=2)
plt.xlabel(r'Terminal Walk Position', fontsize=16)
plt.ylabel("Normalized Counts", fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
Пример #41
0
import sys
import matplotlib.pylab as plt

f = open(sys.argv[1], 'r')

while True:
    plot_name = f.readline()
    if plot_name is None or len(plot_name) == 0:
        break

    plot_name = plot_name.rstrip()
    fig, ax = plt.subplots()

    while True:
        label = f.readline().rstrip()
        if label is None or len(label) == 0:
            break
        x = [float(n) for n in f.readline().rstrip().split(' ')]
        y = [float(n) for n in f.readline().rstrip().split(' ')]
        ax.loglog(x, y, label=label.rstrip())

    ax.legend()
    plt.title(plot_name)
    plt.xlabel('Array size')
    plt.ylabel('time (s)')
    plot_name = plot_name.replace(':', '').replace(' ', '_')
    plt.savefig(plot_name + ".png")
    plt.close()
Пример #42
0
#%% Random Walk

import random
import matplotlib.pylab as plt
import numpy as np


def random_walk(n):
    x = 0
    c = [x]
    for i in range(n):
        prob = random.random()
        if prob > 0.5:
            x = x + 1
        elif prob < 0.5:
            x = x - 1
        c.append(x)
    return c


numberOfSteps = 100
walk = random_walk(numberOfSteps)

n = np.arange(numberOfSteps + 1)

plt.plot(n, walk, 'r-')
plt.plot(n, walk, 'bo')
plt.xlabel('Time (n)')
plt.ylabel('Position (x)')
plt.show()
Пример #43
0
#4.3.2 数值微分的例子
def function_1(x):
    return 0.01 * x**2 + 0.1 * x


#画切线的函数
def tangent_line(f, x):
    d = numerical_diff(f, x)
    print(d)
    y = f(x) - d * x  #???
    return lambda t: d * t + y


x = np.arange(0.0, 20.0, 0.1)  #以0.1为间隔,从0到20
y = function_1(x)
plt.xlabel("x")
plt.ylabel("f(x)")
plt.plot(x, y)
plt.show()

print(numerical_diff(function_1, 5))
print(numerical_diff(function_1, 10))

tf = tangent_line(function_1, 5)
y2 = tf(x)  #???

plt.plot(x, y)
plt.plot(x, y2)  #切线
plt.show()
Пример #44
0
             y4,
             yerr=erry4,
             fmt='r^',
             ecolor='r',
             markeredgecolor='red',
             capthick=0.5,
             label="TOTEM")

plt.fill_between(px3, py3, py4, color='c')
plt.fill_between(px5, py5, py6, color='c')

plt.plot(px1, py1, 'k-', linewidth=1.0, label=r"$pp$")
plt.plot(px2, py2, 'k--', linewidth=1.0, label=r"$\bar{p}p$")

plt.xlabel(r"$\sqrt{s}$ [GeV]", fontdict=font)
plt.ylabel(r"$\sigma_{tot}(s)$ [mb]", fontdict=font)

leg = plt.legend(loc=4,
                 ncol=1,
                 shadow=False,
                 fancybox=False,
                 frameon=False,
                 numpoints=1)
leg.get_frame().set_alpha(0.5)

###########################################
sub_axes = plt.axes([.2, .55, .35, .35])
plt.grid(False)
plt.semilogx()
plt.xlim(5000, 15000)
plt.ylim(90, 120)
import matplotlib.pylab as plt

data = genfromtxt("revised_AD_CNN_cross.csv", delimiter=",")

data[0, 0] = 0.93456
print(data[0, 0])

ax = sns.heatmap(data,
                 linewidth=0.01,
                 center=0.5,
                 cmap="PiYG",
                 cbar_kws={"ticks": [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]})

plt.xlabel("CNN model Index")
plt.ylabel("Target Index")
plt.tight_layout()

plt.savefig("revised_AD_cross_AUC.png", dpi=400)
plt.show()

import pandas as pd

data = pd.read_excel("revised_AD_CNN_cross.xlsx")

models = list(data.columns.values)

targets = list(data.index.values)

subsets = {
    "GPCR":
Пример #46
0
rho2_systemic = rho42_005[200:]
system_error2= np.mean(rho2_systemic)
print "system error for dx=0.05 = ", system_error2 


ratio = np.power((0.01/0.05),-2)
print "Compare expected ratio = ", ratio ,  "with computed ratio = " , system_error1/system_error2  


plot1 =plt.plot(rho_42_005, color='green', label = r'$\Delta x=0.05$')
plot2 =plt.plot(rho_42_001,  color ='blue', label = '$\Delta x=0.01$')
plt.legend(bbox_to_anchor=(1, 1), numpoints = 1 )

plt.xlabel('m', fontsize = 16)
plt.ylabel(r'$\Delta \rho$', fontsize = 20)

plt.annotate(r'$ \frac{\mathbb{E} \left[ \rho_{(\Delta x=0.01)} \right] }{\mathbb{E} \left[ \rho_{(\Delta x=0.05)} \right] }= 1.2028$', xy=(800, 0), xytext=(200, -0.1), fontsize=20)
            

plt.savefig("results/rho_particle42_ratio1p2.pdf")
plt.show()
plot1 =plt.plot(data1, color='green', label = r'$\Delta x=0.01$')
plot2 =plt.plot(data2,  label = '$\Delta x=0.05$')
#plot1 =plt.plot(np.append(np.roll(value,1),value[9]) )
#plot1 =plt.plot( norm_Jv_fp )
#M=10
#norm_Jv_fp 
#average_Jv_norm = scipy.zeros((M))
#for m in range(len(Jv_norm)):
    
Пример #47
0
        fn = sArg[2:]
        bFilename = True
    #   grid on
    if "-G" in sArg:
        bGrid = True

#   filename is mandatory
if not bFilename:
    sys.exit(
        'Usage: python3 fpqSegmentAngleLengthCrossPlot.py -I<inputfilename>')

#   alternative method
nodelist = fpq.getNodes(fn)
xnodelist = nodelist[0]
ynodelist = nodelist[1]
segangle = fpq.getSegAngles(xnodelist, ynodelist)
seglengths = fpq.getSegLengths(xnodelist, ynodelist)
nSegs = len(segangle)

#   plot the segment length distribution
fig, ax = plt.subplots(figsize=(xSize, ySize))
plt.plot(segangle, seglengths, 'o')
plt.xlabel('Segment angle, degrees')
plt.ylabel('Segment length, pixels')
plt.xlim(0, 180)
plt.ylim(0, max(seglengths) * 1.05)
plt.grid(bGrid)
plt.title('Segment angles versus lengths, n=%i' % nSegs)
plt.savefig("fpqSegmentAngleLengthCrossPlot.png", dpi=600)

print('Plotted %5d segments, angles & lengths' % nSegs)
plt.text(0.245, yhaz2+yoff/5., '1/2475-year AEP', va='bottom',ha='right',fontsize=16)
plt.legend()

plt.grid(which='both')
 
# get x lims from haz curve 1
thaz = exp(interp(log(1e-4), log(sd1['poe_probs_annual'][::-1]), log(imls[::-1])))

# round to neares t 0.1
xmax = ceil(thaz / 0.1)  * 0.1
plt.xlim([0, xmax])
plt.ylim([1e-4, .1])
plt.xlim([0, .25])
ax.tick_params(labelsize=14)

plt.ylabel('Annual Probabability of Exceedance', fontsize=16)

plt.xlabel(' '.join(('Mean', period, 'Hazard (g)')), fontsize=16)
                       
# adjust x axes
#fig.subplots_adjust(hspace=0.2)

# save
if period == 'PGA' or period == 'PGV':
    plt.savefig('_'.join(('nsha18_haz_curves', period +'.png')), format='png',bbox_inches='tight')
else:
    plt.savefig('_'.join(('nsha18_haz_curves','SA('+period+').png')), format='png',bbox_inches='tight')

  
plt.show()
    
Пример #49
0
                                 tick_params, xlim

    #f = figure(figsize=(10, 3))
    for i, folder in enumerate(folders):
        if folder.startswith("B0"): #or folder.startswith("P0")) and \
            if folder in ["B0010", "C0087", "C0093"]:
                continue
            print "Working on:", folder
            case = path.join(basefolder, folder)
            length, a = main(case, beta, smooth, stats, ratio)

            # Plot
            f = plot(length, a, linewidth=3, color=color[i])
            hold("on")

        ylabel("Area", fontsize=20)

    #### FOR AREA VARIATION PLOT ####
    tick_params(
        axis='y',          # changes apply to the y-axis
        which='both',      # both major and minor ticks are affected
        left='off',        # ticks along the bottom edge are off
        right='off',       # ticks along the top edge are off
        labelleft='off')   # labels along the bottom edge are off

    tick_params(
        axis='y',          # changes apply to the y-axis
        which='both',      # both major and minor ticks are affected
        top='off')         # ticks

    tight_layout()
Пример #50
0
x[0] = 0
exact_x[0] = 0
v[0] = 0
a[0] = 0
F[0] = 0

## Loop
for i in range(len(t) - 1):
    b[i + 1] = b_C + u * t[i]

    if (b[i] - x[i] - b_C) > (m * g * fr_s) / k:
        a[i + 1] = (k / m) * (b[i] - x[i] - b_C) - g * fr_d
        v[i + 1] = v[i] + dt * a[i + 1]
        F[i + 1] = k * (b[i] - x[i] - b_C)
    else:
        a[i + 1] = 0
        v[i + 1] = 0
        F[i + 1] = 0

    x[i + 1] = x[i] + dt * v[i + 1]
    exact_x[i + 1] = u * t[i] - (u / w) * np.sin(w * t[i])

plt.plot(t, F)
plt.xlabel("t")
plt.ylabel("Y")
plt.title("m = 1.0")
plt.legend(["Force", "Velocity", "Acceleration"])
#plt.savefig("oppg_t_1.png")
plt.show()
Пример #51
0
eva_mean = eigvals_col.mean(axis=0)
data = np.load(join(figdir, "H_avg_1000cls.npz"))
eva_Havg, evc_Havg, Havg = data['eigvals_avg'], data['eigvects_avg'], data['H_avg']
#%% BigGAN
plt.figure(figsize=[5, 4])
eva00 = np.load(join(datadir, "eig_gen_z.npz"))["eva"]  #, H=H00, eva=eva00, evc=evc00)
plt.plot(np.log10(eva00 / eva00.max())[::-1], label="gen_z")
for blocki in range(Ln):
    eva00 = np.load(join(datadir, "eig_genBlock%02d.npz" % blocki))["eva"]
    plt.plot(np.log10(eva00 / eva00.max())[::-1], color=cm.jet((blocki+1) / (Ln+1)),
             label=("GenBlock%02d" % blocki) if blocki!=8 else "SelfAttention")
plt.plot(np.log10(eva_mean / eva_mean.max()), color=cm.jet((Ln+1) / (Ln+1)),
             label="GAN")
plt.xlim([0, len(eva00)])
plt.xlabel("eigenvalue rank")
plt.ylabel("log(eig / eig max)")
plt.title("BigGAN Hessian Spectra of\n Intermediate Layers Compared")
plt.subplots_adjust(top=0.9)
plt.legend()
plt.savefig(join(figdir, "spectrum_med_Layers_norm_cmp.png"))
plt.savefig(join(figdir, "spectrum_med_Layers_norm_cmp.pdf"))
plt.show()
#%% Collect the H and eva evc into list.
npzlabel = ["gen_z"]+["genBlock%02d"%blocki for blocki in range(Ln)]
layernames = ["gen_z"] + [("GenBlock%02d" % blocki) if blocki!=8 else "SelfAttention" for blocki in range(Ln)] \
                + ["Full"]
H_col = []
evc_col = []
eva_col = []
for label in npzlabel:
    data = np.load(join(datadir, "eig_%s.npz" % label))
Пример #52
0
print elapsedtime, '  seconds'

# ~ ~ ~ ~ Plot Results ~ ~ ~ ~ ~ ~ #
lambd = x[3] * pixels**2 + x[4] * pixels + x[5]
plt.figure()
lsf = sumgaus(x, spec[:, 0], nsat)

#lsf = gaussian(spec[:,0],sig)
tau = x[0]
norm = x[1]
sig = x[2]
dxa = x[3]
dxb = x[4]
dxc = x[5]
amps = x[6:len(x)]
model = fftconvolve(norm * (spec[:, 1]**tau), lsf, 'same')
modelinterp = interp1d(spec[:, 0], model)
plt.plot(lambd, realspec0, 'k', lambd, modelinterp(lambd), 'g')

plt.figure()
plt.plot(lambd, realspec0 - modelinterp(lambd), 'g')

plt.figure()
plt.plot(chi_ev)
plt.xlabel('N_iteration')
plt.ylabel('Function Value')

#best one
#array([  3.48537391e-01,   9.88941209e-01,   1.38819216e-02,
#        -3.95332754e-07,   1.10756971e-02,   8.16905624e+02])
import numpy as np
import matplotlib.pylab as plt
import matplotlib as mp
from matplotlib.backends.backend_pdf import PdfPages
from scipy.stats import beta

# mp.rc('font', family = 'serif', serif = 'cmr10')
mp.rcParams['mathtext.fontset'] = 'cm'
mp.rcParams.update({'font.size': 16})

a = 2
b = 2
x = np.linspace(0,1,1000)
u = np.ones(len(x))
u[0] = 0
u[-1] = 0

with PdfPages('distance_distributions.pdf') as pdf:
	plt.plot(0.2*x,5*beta.pdf(x, a, b), label="paired distribution")
	plt.plot(x,u, label="unpaired distribution")
	plt.xlabel("distance, $d$")
	plt.ylabel("probability density")
	plt.legend(loc="best")
	pdf.savefig(bbox_inches="tight")
plt.yticks(np.linspace(-2, 0, 5),fontsize=20)
plt.rc('xtick',labelsize=15)
plt.rc('ytick',labelsize=15)
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))
ax.spines['bottom'].set_position(('data',0))

plt.legend(loc=4,fontsize=13)
plt.title('rho = 60um, w0 = 10um',fontsize=20)

plt.xlabel('w/rho',fontsize=20)
plt.ylabel('Qz',fontsize=20)
plt.grid()
plt.show()

MTP.table_parameter('x-axis w0 to 7rho', 'related to w', '0','0, rho, 2rho, 3rho', '30')

t_finish = time.time()

print(f"script executed in {t_finish-t_start:.2f} seconds")






'''
Пример #55
0
    if pops[-1] < 0:
        RC0[-1, -1] = 0
        print("Predator reached extinction after {} iterations".format(i))
        break

# ensure time is not longer than the RC0 data
t_axis = range(len(RC0))

# plot it
f1 = p.figure()
p.plot(t_axis, RC0[:, 0], 'g-', label='Resource density')  # Plot
p.plot(t_axis, RC0[:, 1], 'b-', label='Consumer density')
p.grid()
p.legend(loc='best')
p.xlabel('Time')
p.ylabel('Population density')
p.title('Consumer-Resource population dynamics')

# save the figure as a pdf`
f1.savefig('../Results/Plots/LV3_model.pdf')

# plot the second circular figure. this just has the values from above as axis
f2 = p.figure()
p.plot(RC0[:, 0], RC0[:, 1], 'r-')  # Plot
p.grid()
p.legend(loc='best')
p.xlabel('Resource density')
p.ylabel('Consumer density')
p.title('Consumer-Resource population dynamics')

# save as pdf
Пример #56
0
from pylab import boxplot

warnings.filterwarnings("ignore")
pvalue = []

#Read the csv file and get only the time series

series = pd.read_csv('C:\Git\sdl\code\dados\lag15Min-BVMF3.csv')
series.drop('Unnamed: 0', axis=1, inplace=True)
series.drop('INSTRUMENT', axis=1, inplace=True)
series.drop('TRADE_TIME', axis=1, inplace=True)
print(series.head())
plt.plot(series)
plt.title('PETR4 May fluctuation - lag 15min')
plt.xlabel('Time (min)')
plt.ylabel('Price (Brazilian Reais)')
plt.show()

#Plot correlation and autocorrelation plots to analyze how the numbers relate

print('ACF and PACF for the original series')
pyplot.figure()
plot_acf(series,
         ax=pyplot.gca(),
         lags=20,
         title='Autocorrelation plot for original series')
pyplot.show()
pyplot.figure()
plot_pacf(series,
          ax=pyplot.gca(),
          lags=20,
Пример #57
0
def plot_traces(traces, fs, n, dataset, filename, rand=True, pre_traces=None):
    # Data len
    N = traces.shape[1]

    # Time axis for signal plot
    t_ax = np.arange(N) / fs

    # Frequency axis for FFT plot
    xf = np.linspace(-fs / 2.0, fs / 2.0 - 1 / fs, N)

    # Traces to plot
    trtp = []

    if rand:
        # Init rng
        rng = default_rng()

        # Traces to plot numbers
        trtp_ids = rng.choice(len(traces), size=n, replace=False)
        trtp_ids.sort()

        # Retrieve selected traces
        for idx, trace in enumerate(traces):
            if idx in trtp_ids:
                trtp.append(trace)

    else:
        trtp_ids = pre_traces

        # Retrieve selected traces
        for idx, trace in enumerate(traces):
            if idx in trtp_ids:
                trtp.append(trace)

    # Plot traces in trtp with their spectrum
    for idx, trace in enumerate(trtp):
        yf = sfft.fftshift(sfft.fft(trace))

        gs = gridspec.GridSpec(2, 2)

        pl.figure()
        pl.subplot(gs[0, :])
        pl.plot(t_ax, trace)
        pl.title(
            f'Traza {dataset} y espectro #{trtp_ids[idx]} archivo {filename}')
        pl.xlabel('Tiempo [s]')
        pl.ylabel('Amplitud [-]')
        pl.grid(True)

        pl.subplot(gs[1, 0])
        pl.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
        pl.xlabel('Frecuencia [Hz]')
        pl.ylabel('Amplitud [-]')
        pl.grid(True)

        pl.subplot(gs[1, 1])
        pl.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
        pl.xlim(-25, 25)
        pl.xlabel('Frecuencia [Hz]')
        pl.ylabel('Amplitud [-]')
        pl.grid(True)
        pl.tight_layout()
        pl.savefig(f'Imgs/{dataset}/{filename}/{trtp_ids[idx]}.png')
Пример #58
0
 def run_opt(self, popsize, numgen, processors,
             plot=False, log=False, **kwargs):
     """
     Runs the optimizer.
     :param popsize:
     :param numgen:
     :param processors:
     :param plot:
     :param log:
     :param kwargs:
     :return:
     """
     self._params['popsize'] = popsize
     self._params['numgen'] = numgen
     self._params['processors'] = processors
     self._params['plot'] = plot
     self._params['log'] = log
     # allows us to pass in additional arguments e.g. neighbours
     self._params.update(**kwargs)
     self.halloffame = tools.HallOfFame(1)
     self.stats = tools.Statistics(lambda thing: thing.fitness.values)
     self.stats.register("avg", numpy.mean)
     self.stats.register("std", numpy.std)
     self.stats.register("min", numpy.min)
     self.stats.register("max", numpy.max)
     self.logbook = tools.Logbook()
     self.logbook.header = ["gen", "evals"] + self.stats.fields
     self._params['model_count'] = 0
     start_time = datetime.datetime.now()
     self.initialize_pop()
     for g in range(self._params['numgen']):
         self.update_pop()
         self.halloffame.update(self.population)
         self.logbook.record(gen=g, evals=self._params['evals'],
                             **self.stats.compile(self.population))
         print(self.logbook.stream)
     end_time = datetime.datetime.now()
     time_taken = end_time - start_time
     self._params['time_taken'] = time_taken
     print("Evaluated {0} models in total".format(
         self._params['model_count']))
     print("Best fitness is {0}".format(self.halloffame[0].fitness))
     print("Best parameters are {0}".format(self.parse_individual(
         self.halloffame[0])))
     for i, entry in enumerate(self.halloffame[0]):
         if entry > 0.95:
             print(
                 "Warning! Parameter {0} is at or near maximum allowed "
                 "value\n".format(i + 1))
         elif entry < -0.95:
             print(
                 "Warning! Parameter {0} is at or near minimum allowed "
                 "value\n".format(i + 1))
     if self._params['log']:
         self.log_results()
     if self._params['plot']:
         print('----Minimisation plot:')
         plt.figure(figsize=(5, 5))
         plt.plot(range(len(self.logbook.select('min'))),
                  self.logbook.select('min'))
         plt.xlabel('Iteration', fontsize=20)
         plt.ylabel('Score', fontsize=20)
Пример #59
0
]
#plt.plot(x,y)

#plt.show()

###################3
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z = np.zeros((len(u), len(v)))


def mapFeatureForPlotting(X1, X2):
    degree = 6
    out = np.ones(1)
    for i in range(1, degree + 1):
        for j in range(i + 1):
            out = np.hstack(
                (out, np.multiply(np.power(X1, i - j), np.power(X2, j))))
    return out


for i in range(len(u)):
    for j in range(len(v)):
        z[i, j] = np.dot(mapFeatureForPlotting(u[i], v[j]), theta.x)

plt.contour(u, v, z, 0)
plt.xlabel('Microchip Test1')
plt.ylabel('Microchip Test2')
#plt.legend((passed, failed), ('Passed', 'Failed'))
plt.show()
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])


class AccuracyHistory(keras.callbacks.Callback):
    def on_train_begin(self, logs={}):
        self.acc = []

    def on_epoch_end(self, batch, logs={}):
        self.acc.append(logs.get('acc'))

history = AccuracyHistory()

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test),
          callbacks=[history])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plt.plot(range(1, 11), history.acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()