Example #1
0
def compareTwoUsers(data1, data2, outdir):
    """Compares data for two users. Currently plots difference in peaks for
  the users on presslengths for different keycodes."""

    def computePeakDifference(d1, d2):
        edges = findCommonEdges(d1, d2)
        h1, e = np.histogram(d1, bins=edges, normed=True)
        h2, e = np.histogram(d2, bins=edges, normed=True)
        a1, a2 = np.argmax(h1), np.argmax(h2)
        diff = (edges[a1] + edges[a1 + 1] - edges[a2] - edges[a2 + 1]) / 2.0
        return diff

    commonKeys = set(data1.keystrokePLs_key.keys()) & set(data2.keystrokePLs_key.keys())
    peakDiffs = []
    for key in commonKeys:
        dat1 = data1.keystrokePLs_key[key]
        dat2 = data2.keystrokePLs_key[key]
        peakDiffs.append(computePeakDifference(dat1, dat2))
    peakDiffs.append(computePeakDifference(data1.keystrokePLs, data2.keystrokePLs))
    edges = findCommonEdges(peakDiffs)
    plt.figure()
    plt.hist(peakDiffs, bins=edges)
    plt.title("Peak Differences for Keystroke PL for %s and %s" % (data1.user, data2.user))
    plt.xlabel("Time (seconds)")
    plt.savefig("%s/%s_%s_kPLpeakDiff.pdf" % (outdir, data1.user, data2.user))
    plt.close()
Example #2
0
    def plot_field(self,x,y,u=None,v=None,F=None,contour=False,outdir=None,plot='quiver',figname='_field',format='eps'):
        outdir = self.set_dir(outdir)
        p = 64
        if F is None: F=self.calc_F(u,v)

        plt.close('all')
        plt.figure()
        #fig, axes = plt.subplots(nrows=1)
        if contour:
            plt.hold(True)
            plt.contourf(x,y,F)
        if plot=='quiver':
            plt.quiver(x[::p],y[::p],u[::p],v[::p],scale=0.1)
        
        if plot=='pcolor':
            plt.pcolormesh(x[::4],y[::4],F[::4],cmap=plt.cm.Pastel1)
            plt.colorbar()

        if plot=='stream':
            speed = F[::16]
            plt.streamplot(x[::16], y[::16], u[::16], v[::16], density=(1,1),color='k')

        plt.xlabel('$x$ (a.u.)')
        plt.ylabel('$y$ (a.u.)')
        
        plt.savefig(os.path.join(outdir,figname+'.'+format),format=format,dpi=320,bbox_inches='tight')
        plt.close()
Example #3
0
def plot_feat_hist(data_name_list, filename=None):
    if len(data_name_list)>1:
        assert filename is not None

    pylab.figure(num=None, figsize=(8, 6))
    num_rows = 1 + (len(data_name_list) - 1) / 2
    num_cols = 1 if len(data_name_list) == 1 else 2
    pylab.figure(figsize=(5 * num_cols, 4 * num_rows))

    for i in range(num_rows):
        for j in range(num_cols):
            pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
            x, name = data_name_list[i * num_cols + j]
            pylab.title(name)
            pylab.xlabel('Value')
            pylab.ylabel('Fraction')
            # the histogram of the data
            max_val = np.max(x)
            if max_val <= 1.0:
                bins = 50
            elif max_val > 50:
                bins = 50
            else:
                bins = max_val
            n, bins, patches = pylab.hist(
                x, bins=bins, normed=1, facecolor='blue', alpha=0.75)

            pylab.grid(True)

    if not filename:
        filename = "feat_hist_%s.png" % name.replace(" ", "_")

    pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
Example #4
0
def plot_experiment_stats(e):
    sample_data = np.where(e.num_test_genotypes(SAMPLE) > 0)[0]
    c_sample = (100.0 * e.called(SAMPLE)[sample_data]) / e.num_test_genotypes(SAMPLE)[sample_data] + 1e-15
    fill = 100.*e.fill[sample_data]

    snp_data = np.where(e.num_test_genotypes(SNP) > 0)[0]
    c_snp = (100.0 * e.called(SNP)[snp_data]) / e.num_test_genotypes(SNP)[snp_data]
    
    # Call % vs. fill %
    P.figure(1);
    P.clf();
    P.plot(fill, c_sample, 'o')
    P.xlabel('Fill %')
    P.ylabel('Call %')
    P.title('Validation Breakdown by Sample, %.2f%% Deleted. r = %.2f' % 
              (100.0 * e.fraction, np.corrcoef(fill + SMALL_FLOAT, c_sample + SMALL_FLOAT)[0, 1],))

    # Call % vs. SNP
    P.figure(2);
    P.clf();
    P.plot(snp_data, c_snp, 'o')
    P.xlabel('SNP #')
    P.ylabel('Call %')
    P.title('Validation Breakdown by SNP, %.2f%% Deleted' % (100.0 * e.fraction,))
    
    return (np.array([snp_data, c_snp]).transpose(),
            np.array([sample_data, c_sample, fill]).transpose())
    def analyze_categorical_feature(self, data_frame, feature_name):
        data = FeatureAnalyzer.convert_into_one_hot_representation(data_frame, feature_name)
        data.is_copy = False
        data['label'] = data['label'].astype(int).apply(lambda x: 0 if x == -1 else 1)
        result_dict = {'category': [], 'correlation': [], 'abs_correlation': [], 'fraction': [], 'rate': []}
        for key in data.keys():
            if key != 'label':
                result_dict['category'].append(key)
                key_data = data[['label', key]][data[key] == 1]
                num_engagements = len(key_data[key_data['label'] == 1])
                if len(key_data) > 0:
                    result_dict['correlation'].append(data[[key, 'label']].corr().values[0, 1])
                    result_dict['abs_correlation'].append(abs(result_dict['correlation'][-1]))
                    result_dict['fraction'].append(len(key_data)/float(len(data)))
                    result_dict['rate'].append(num_engagements/float(len(key_data)))
                else:
                    result_dict['correlation'].append(0.0)
                    result_dict['abs_correlation'].append(0.0)
                    result_dict['fraction'].append(0.0)
                    result_dict['rate'].append(0.0)

        plt.figure()
        result = pandas.DataFrame(data=result_dict).sort(columns='abs_correlation', ascending=False)
        fig = result.plot(x='category', y=['correlation', 'fraction', 'rate'], kind='bar', stacked=True, figsize=(len(data)/200, 10)).get_figure()
        fig.set_tight_layout(True)
        png_path = 'Data/' + self.experiment_name + '_' + feature_name + '.png'
        fig.savefig(png_path, dpi=fig.dpi)
Example #6
0
 def ROC(self, x):
     """
     ROC curve for seperating positive Gamma distribution
     from two other modes, predicted by current parameter values
     -x: vector of observations
     
     Output: P
     P[0]: False positive rates
     P[1]: True positive rates
     """
     import matplotlib.pylab as mp
     p = len(x)
     P = np.zeros((2,p))
     #False positives
     P[0] = (self.mixt[0]*st.gamma.cdf(-x,self.shape_n,scale=self.scale_n)
              + self.mixt[1]*st.norm.sf(x,0,np.sqrt(self.var)))/\
              (self.mixt[0] + self.mixt[1])
     #True positives
     P[1] = st.gamma.sf(x,self.shape_p,scale=self.scale_p)
     mp.figure()
     I = P[0].argsort()
     mp.plot(P[0,I],P[0,I],'r-')
     mp.plot(P[0,I],P[1,I],'g-')
     mp.legend(('False positive rate','True positive rate'))
     return P
Example #7
0
def make_corr1d_fig(dosave=False):
    corr = make_corr_both_hemi()
    lw=2; fs=16
    pl.figure(1)#, figsize=(8, 7))
    pl.clf()
    pl.xlim(4,300)
    pl.ylim(-400,+500)    
    lambda_titles = [r'$20 < \lambda < 30$',
                     r'$30 < \lambda < 40$',
                     r'$\lambda > 40$']
    colors = ['blue','green','red']
    for i in range(3):
        corr1d, rcen = corr_1d_from_2d(corr[i])
        ipdb.set_trace()
        pl.semilogx(rcen, corr1d*rcen**2, lw=lw, color=colors[i])
        #pl.semilogx(rcen, corr1d*rcen**2, 'o', lw=lw, color=colors[i])
    pl.xlabel(r'$s (Mpc)$',fontsize=fs)
    pl.ylabel(r'$s^2 \xi_0(s)$', fontsize=fs)    
    pl.legend(lambda_titles, 'lower left', fontsize=fs+3)
    pl.plot([.1,10000],[0,0],'k--')
    s_bao = 149.28
    pl.plot([s_bao, s_bao],[-9e9,+9e9],'k--')
    pl.text(s_bao*1.03, 420, 'BAO scale')
    pl.text(s_bao*1.03, 370, '%0.1f Mpc'%s_bao)
    if dosave: pl.savefig('xi1d_3bin.pdf')
    def check_models(self):
        '''
        Displays a plot of the models against that taken from a
        respected website (https://www.pvlighthouse.com.au/)
        '''
        plt.figure('Intrinsic bandgap')
        t = np.linspace(1, 500)

        for author in self.available_models():

            Eg = self.update(temp=t, author=author, multiplier=1.0)
            plt.plot(t, Eg, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'iBg.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for temp, name in zip(data.dtype.names[0::2], data.dtype.names[1::2]):
            plt.plot(
                data[temp], data[name], '--', label=name)

        plt.xlabel('Temperature (K)')
        plt.ylabel('Intrinsic Bandgap (eV)')

        plt.legend(loc=0)
        self.update(temp=0, author=author, multiplier=1.01)
Example #9
0
    def show(self, x):
        """
        vizualisation of the mm based on the empirical histogram of x

        Parameters
        ----------
        x: array of shape(nbitems): the data to be processed
        """
        step = 3.5*np.std(x)/np.exp(np.log(np.size(x))/3)
        bins = max(10,int((x.max()-x.min())/step))
        h,c = np.histogram(x, bins)
        h = h.astype(np.float)/np.size(x)
        p = self.mixt
        
        dc = c[1]-c[0]
        y = (1-p)*_gaus_dens(self.mean,self.var,c)*dc
        z = np.zeros(np.size(c))
        i = np.ravel(np.nonzero(c>0))
        z = _gam_dens(self.shape,self.scale,c)*p*dc
        
        import matplotlib.pylab as mp
        mp.figure()
        mp.plot(0.5 *(c[1:] + c[:-1]),h)
        mp.plot(c,y,'r')
        mp.plot(c,z,'g')
        mp.plot(c,z+y,'k')
        mp.title('Fit of the density with a Gamma-Gaussians mixture')
        mp.legend(('data','gaussian acomponent','gamma component',
                   'mixture distribution'))
Example #10
0
    def show(self, rescale=True, ax=None):
        """Visualization of a design matrix

        Parameters
        ----------
        rescale: bool, optional
                 rescale columns magnitude for visualization or not
        ax: figure handle, optional

        Returns
        -------
        ax, figure handle
        """
        x = self.matrix.copy()
        if rescale:
            x = x / np.sqrt(np.sum(x ** 2, 0))

        import matplotlib.pylab as mp
        if ax is None:
            mp.figure()
            ax = mp.subplot(1, 1, 1)

        ax.imshow(x, interpolation='Nearest', aspect='auto')
        ax.set_label('conditions')
        ax.set_ylabel('scan number')

        if self.names is not None:
            ax.set_xticks(range(len(self.names)))
            ax.set_xticklabels(self.names, rotation=60, ha='right')
        return ax
Example #11
0
    def test1():
        x = [0.5]*3
        xbounds = [(-5, 5) for y in x]


        GA = GenAlg(fitcalc1, x, xbounds, popMult=100, bitsPerGene=9, mutation=(1./9.), crossover=0.65, crossN=2, direction='min', maxGens=60, hammingDist=False)
        results = GA.run()
        print "*** DONE ***"
        #print results
        plt.ioff()
        #generate pareto frontier numerically
        x1_ = np.arange(-5., 0., 0.05)
        x2_ = np.arange(-5., 0., 0.05)
        x3_ = np.arange(-5., 0., 0.05)

        pfn = []
        for x1 in x1_:
            for x2 in x2_:
                for x3 in x3_:
                    pfn.append(fitcalc1([x1,x2,x3]))

        pfn.sort(key=lambda x:x[0])
        
        plt.figure()
        i = 0
        for x in results:
            plt.scatter(x[1][0], x[1][1], 20, c='r')

        plt.scatter([x[0] for x in pfn], [x[1] for x in pfn], 1.0, c='b', alpha=0.1)
        plt.xlim([-20,-1])
        plt.ylim([-12, 2])
        plt.draw()
def viz_docwordfreq_sidebyside(P1, P2, title1='', title2='', 
                                vmax=None, aspect=None, block=False):
  from matplotlib import pylab
  pylab.figure()

  if vmax is None:
    vmax = 1.0
    P1limit = np.percentile(P1.flatten(), 97)
    if P2 is not None:
      P2limit = np.percentile(P2.flatten(), 97)
    else:
      P2limit = P1limit
    while vmax > P1limit and vmax > P2limit:
      vmax = 0.8 * vmax

  if aspect is None:
    aspect = float(P1.shape[1])/P1.shape[0]
  pylab.subplot(1, 2, 1)
  pylab.imshow(P1, aspect=aspect, interpolation='nearest', vmin=0, vmax=vmax)
  if len(title1) > 0:
    pylab.title(title1)
  if P2 is not None:
    pylab.subplot(1, 2, 2)
    pylab.imshow(P2, aspect=aspect, interpolation='nearest', vmin=0, vmax=vmax)
    if len(title2) > 0:
      pylab.title(title2)
  pylab.show(block=block)
Example #13
0
    def test_flux(self):
        tol = 150.
        inputcat = catalog.read(os.path.join(self.args.tmp_path, 'ccd_1.cat'))
        pixradius = 3*self.target["psf"]/self.instrument["PIXEL_SCALE"]
        positions = list(zip(inputcat["X_IMAGE"]-1, inputcat["Y_IMAGE"]-1))
        fluxes = image.simple_aper_phot(self.im[1], positions, pixradius)
        sky_background = image.annulus_photometry(self.im[1], positions,
        	pixradius+5, pixradius+8)

        total_bg_pixels = np.shape(image.build_annulus_mask(pixradius+5, pixradius+8, positions[0]))[1]
        total_source_pixels = np.shape(image.build_circle_mask(pixradius,
        	positions[0]))[1]

        estimated_fluxes = fluxes - sky_background*1./total_bg_pixels*total_source_pixels

        estimated_magnitude = image.flux2mag(estimated_fluxes,
        	self.im[1].header['SIMMAGZP'], self.target["exptime"])

        expected_flux = image.mag2adu(17.5, self.target["zeropoint"][0],
        	exptime=self.target["exptime"])

        p.figure()
        p.hist(fluxes, bins=50)
        p.title('Expected flux: {:0.2f}, mean flux: {:1.2f}'.format(expected_flux, np.mean(estimated_fluxes)))
        p.savefig(os.path.join(self.figdir,'Fluxes.png'))

        assert np.all(np.abs(fluxes-expected_flux) < tol)
Example #14
0
    def test_2d_esmf_conserv(self):
        print 'running test_2d_esmf_conserv...'
        f = cdms2.open(sys.prefix + \
                           '/sample_data/so_Omon_ACCESS1-0_historical_r1i1p1_185001-185412_2timesteps.nc')
        so = f('so')[0, 0, :, :]
        clt = cdms2.open(sys.prefix + '/sample_data/clt.nc')('clt')[0, :, :]
        tic = time.time()
        soInterp = so.regrid(clt.getGrid(), regridTool='ESMF', regridMethod='Conservative')
        soInterpInterp = soInterp.regrid(so.getGrid(), regridTool='ESMF', 
                                         regridMethod='Conservative')
        toc = time.time()
        print 'time to interpolate (ESMF conservative) forward/backward: ', toc - tic
        ntot = reduce(operator.mul, so.shape)
        avgdiff = numpy.sum(so - soInterpInterp) / float(ntot)
        print 'avgdiff = ', avgdiff

        if PLOT:
            pylab.figure(2)
            pylab.subplot(2, 2, 1)
            pylab.pcolor(so, vmin=20.0, vmax=40.0)
            pylab.colorbar()
            pylab.title('ESMF conserve regrid: so')
            pylab.subplot(2, 2, 2)
            pylab.pcolor(soInterp, vmin=20.0, vmax=40.0)
            pylab.colorbar()
            pylab.title('ESMF conserve regrid: soInterp')
            pylab.subplot(2, 2, 3)
            pylab.pcolor(soInterpInterp, vmin=20.0, vmax=40.0)
            pylab.colorbar()
            pylab.title('ESMF conserve regrid: soInterpInterp')
            pylab.subplot(2, 2, 4)
            pylab.pcolor(so - soInterpInterp, vmin=-0.5, vmax=0.5)
            pylab.colorbar()
            pylab.title('ESMF conserve regrid: error')
Example #15
0
def plot_grid_experiment_results(grid_results, params, metrics):
    global plt
    params = sorted(params)
    grid_params = grid_results.grid_params
    plt.figure(figsize=(8, 6))
    for metric in metrics:
        grid_params_shape = [len(grid_params[k]) for k in sorted(grid_params.keys())]
        params_max_out = [(1 if k in params else 0) for k in sorted(grid_params.keys())]
        results = np.array([e.results.get(metric, 0) for e in grid_results.experiments])
        results = results.reshape(*grid_params_shape)
        for axis, included_in_params in enumerate(params_max_out):
            if not included_in_params:
                results = np.apply_along_axis(np.max, axis, results)

        print results
        params_shape = [len(grid_params[k]) for k in sorted(params)]
        results = results.reshape(*params_shape)

        if len(results.shape) == 1:
            results = results.reshape(-1,1)
        import matplotlib.pylab as plt

        #f.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
        plt.imshow(results, interpolation='nearest', cmap=plt.cm.hot)
        plt.title(str(grid_results.name) + " " + metric)

        if len(params) == 2:
            plt.xticks(np.arange(len(grid_params[params[1]])), grid_params[params[1]], rotation=45)
        plt.yticks(np.arange(len(grid_params[params[0]])), grid_params[params[0]])
        plt.colorbar()
        plt.show()
Example #16
0
def plot_feat_hist(data_name_list, filename=None):
    pylab.clf()
    # import pdb;pdb.set_trace()
    num_rows = 1 + (len(data_name_list) - 1) / 2
    num_cols = 1 if len(data_name_list) == 1 else 2
    pylab.figure(figsize=(5 * num_cols, 4 * num_rows))

    for i in range(num_rows):
        for j in range(num_cols):
            pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
            x, name = data_name_list[i * num_cols + j]
            pylab.title(name)
            pylab.xlabel('Value')
            pylab.ylabel('Density')
            # the histogram of the data
            max_val = np.max(x)
            if max_val <= 1.0:
                bins = 50
            elif max_val > 50:
                bins = 50
            else:
                bins = max_val
            n, bins, patches = pylab.hist(
                x, bins=bins, normed=1, facecolor='green', alpha=0.75)

            pylab.grid(True)

    if not filename:
        filename = "feat_hist_%s.png" % name

    pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
Example #17
0
    def visualization2(self, sp_to_vis=None):
        if sp_to_vis:
            species_ready = list(set(sp_to_vis).intersection(self.all_sp_signatures.keys()))
        else:
            raise Exception('list of driver species must be defined')

        if not species_ready:
            raise Exception('None of the input species is a driver')

        for sp in species_ready:
            # Setting up figure
            plt.figure()
            plt.subplot(313)

            mon_val = OrderedDict()
            signature = self.all_sp_signatures[sp]
            for idx, mon in enumerate(list(set(signature))):
                if mon[0] == 'C':
                    mon_val[self.all_comb[sp][mon] + (-1,)] = idx
                else:
                    mon_val[self.all_comb[sp][mon]] = idx

            mon_rep = [0] * len(signature)
            for i, m in enumerate(signature):
                if m[0] == 'C':
                    mon_rep[i] = mon_val[self.all_comb[sp][m] + (-1,)]
                else:
                    mon_rep[i] = mon_val[self.all_comb[sp][m]]
            # mon_rep = [mon_val[self.all_comb[sp][m]] for m in signature]

            y_pos = numpy.arange(len(mon_val.keys()))
            plt.scatter(self.tspan[1:], mon_rep)
            plt.yticks(y_pos, mon_val.keys())
            plt.ylabel('Monomials', fontsize=16)
            plt.xlabel('Time(s)', fontsize=16)
            plt.xlim(0, self.tspan[-1])
            plt.ylim(0, max(y_pos))

            plt.subplot(312)

            for name in self.model.odes[sp].as_coefficients_dict():
                mon = name
                mon = mon.subs(self.param_values)
                var_to_study = [atom for atom in mon.atoms(sympy.Symbol)]
                arg_f1 = [numpy.maximum(self.mach_eps, self.y[str(va)][1:]) for va in var_to_study]
                f1 = sympy.lambdify(var_to_study, mon)
                mon_values = f1(*arg_f1)
                mon_name = str(name).partition('__')[2]
                plt.plot(self.tspan[1:], mon_values, label=mon_name)
            plt.ylabel('Rate(m/sec)', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.1, 0.85), loc='upper right', ncol=1)

            plt.subplot(311)
            plt.plot(self.tspan[1:], self.y['__s%d' % sp][1:], label=parse_name(self.model.species[sp]))
            plt.ylabel('Molecules', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.15, 0.85), loc='upper right', ncol=1)
            plt.suptitle('Tropicalization' + ' ' + str(self.model.species[sp]))

            # plt.show()
            plt.savefig('s%d' % sp + '.png', bbox_inches='tight', dpi=400)
Example #18
0
    def plot_corner_posteriors(self, savefile=None, labels=["T1", "R1", "Av", "T2", "R2"]):
        '''
        Plots the corner plot of the MCMC results.
        '''
        ndim = len(self.sampler.flatchain[0,:])
        chain = self.sampler
        samples = chain.flatchain
        
        samples = samples[:,0:ndim]  
        plt.figure(figsize=(8,8))
        fig = corner.corner(samples, labels=labels[0:ndim])
        plt.title("MJD: %.2f"%self.mjd)
        name = self._get_save_path(savefile, "mcmc_posteriors")
        plt.savefig(name)
        plt.close("all")
        

        plt.figure(figsize=(8,ndim*3))
        for n in range(ndim):
            plt.subplot(ndim,1,n+1)
            chain = self.sampler.chain[:,:,n]
            nwalk, nit = chain.shape
            
            for i in np.arange(nwalk):
                plt.plot(chain[i], lw=0.1)
                plt.ylabel(labels[n])
                plt.xlabel("Iteration")
        name_walkers = self._get_save_path(savefile, "mcmc_walkers")
        plt.tight_layout()
        plt.savefig(name_walkers)
        plt.close("all")  
Example #19
0
def plot_peaks(spectra, ref, zl, pl, filename=''):
  plt.figure();
  plt.title("Debug: Peak fitting for '%s'" % filename);
  plt.xlabel("y-position [px]");
  plt.ylabel("Intensity");

  Nspectra, Npx = spectra.shape;

  for s in xrange(Nspectra):
    scale = 1./spectra.max();
    offset= -s*0.1;

    # plot data
    plt.plot(spectra[s]*scale + offset,'k',linewidth=2);

    # plot first peak
    p,A,w = zl[s];
    x = np.arange(-2*w, 2*w) + p;
    plt.plot(x,gauss(x,*zl[s])*scale + offset,'r');
  
    # plot second peak
    if ref is not None:
      p,A   = pl[s];
      x = np.arange(len(ref)) - len(ref)/2 + p;
      plt.plot(x,ref/ref.max()*A*scale + offset,'g');
Example #20
0
def check_isometry(G, chart, nseeds=100, verbose = 0):
    """
    A simple check of the Isometry:
    look whether the output distance match the intput distances
    for nseeds points
    
    Returns
    -------
    a scaling factor between the proposed and the true metrics
    """
    nseeds = np.minimum(nseeds, G.V)
    aux = np.argsort(nr.rand(nseeds))
    seeds =  aux[:nseeds]
    dY = Euclidian_distance(chart[seeds],chart)
    dx = G.floyd(seeds)

    dY = np.reshape(dY,np.size(dY))
    dx = np.reshape(dx,np.size(dx))

    if verbose:
        import matplotlib.pylab as mp
        mp.figure()
        mp.plot(dx,dY,'.')
        mp.show()

    scale = np.dot(dx,dY)/np.dot(dx,dx)
    return scale
Example #21
0
def infer_latent_dim(X, verbose=0, maxr=-1):
    """
    r = infer_latent_dim(X, verbose=0)
    Infer the latent dimension of an aray assuming data+gaussian noise mixture
    
    Parameters
    ----------
    array X, data whose deimsnionhas to be inferred
    verbose=0, int, verbosity level
    maxr=-1, int, maximum dimension that can be achieved
             if maxr = -1, this is equal to rank(X)
    
    Returns
    -------
    r, int, the inferred dimension
    """
    if maxr ==-1:
        maxr = np.minimum(X.shape[0],X.shape[1])
        
    U,S,V = nl.svd(X,0)
    if verbose>1:
        print "Singular Values", S
    L = []
    for k in range(maxr):
        L.append(_linear_dim_criterion_(S,k,X.shape[1],X.shape[0])/X.shape[0])

    L = np.array(L)
    rank = np.argmax(L)
    if verbose:
        import matplotlib.pylab as mp
        mp.figure()
        mp.bar(np.arange(maxr),L-L.mean())

    return rank
Example #22
0
def _fig_density(sweight, surweight, pval, nlm):
    """
    Plot the histogram of sweight across the image
    and the thresholds implied by the surrogate model (surweight)
    """
    import matplotlib.pylab as mp
    # compute some thresholds
    nlm = nlm.astype('d')
    srweight = np.sum(surweight,1)
    srw = np.sort(srweight)
    nitem = np.size(srweight)
    thf = srw[int((1-min(pval,1))*nitem)]
    mnlm = max(1,nlm.mean())
    imin = min(nitem-1,int((1.-pval/mnlm)*nitem))
    
    thcf = srw[imin]
    h,c = np.histogram(sweight,100)
    I = h.sum()*(c[1]-c[0])
    h = h/I
    h0,c0 = np.histogram(srweight,100)
    I0 = h0.sum()*(c0[1]-c0[0])
    h0 = h0/I0
    mp.figure(1)
    mp.plot(c,h)
    mp.plot(c0,h0)
    mp.legend(('true histogram','surrogate histogram'))
    mp.plot([thf,thf],[0,0.8*h0.max()])
    mp.text(thf,0.8*h0.max(),'p<0.2, uncorrected')
    mp.plot([thcf,thcf],[0,0.5*h0.max()])
    mp.text(thcf,0.5*h0.max(),'p<0.05, corrected')
    mp.savefig('/tmp/histo_density.eps')
    mp.show()
def item_nbr_tendency_finely(store_nbr, year, month_start=-1, month_end=-1, graph=True):
    '''
    input
        1. store_nbr = 스토어 번호
        2. year = 연도
        3. month_start = 시작달
        4. month_start = 끝달
        5. graph = 위의 정보에 대한 item_nbr 그래프 출력여부
    
    output
        1. store_nbr, year, month로 filtering한 item_nbr의 pivot 테이블
    '''
    store = df_1[(df_1['store_nbr'] == store_nbr) &
                 (df_1['year'] == year)]

    if month_start != -1:
        if month_end == -1:
            month_end = month_start + 1
        store = store[(month_start <= store['month']) & (store['month'] < month_end)]

    pivot = store.pivot_table(index='item_nbr',
                              columns='date',
                              values='units',
                              aggfunc=np.sum)

    zero_index = pivot == 0
    pivot = pivot[pivot != 0].dropna(axis=0, how='all')
    pivot[zero_index] = 0

    if graph:
        plt.figure(figsize=(12, 8))
        sns.heatmap(pivot, cmap="YlGnBu", annot=True, fmt='.0f')
        plt.show()

    return pivot
def flipPlot(minExp, maxExp):
    """假定minEXPy和maxExp是正整数且minExp<maxExp
    绘制出2**minExp到2**maxExp次抛硬币的结果
    """
    ratios = []
    diffs = []
    aAxis = []
    for i in range(minExp, maxExp+1):
        aAxis.append(2**i)
    for numFlips in aAxis:
        numHeads = 0
        for n in range(numFlips):
            if random.random() < 0.5:
                numHeads += 1
        numTails = numFlips - numHeads
        ratios.append(numHeads/numFlips)
        diffs.append(abs(numHeads-numTails))
    plt.figure()
    ax1 = plt.subplot(121)
    plt.title("Difference Between Heads and Tails")
    plt.xlabel('Number of Flips')
    plt.ylabel('Abs(#Heads - #Tails)')
    ax1.semilogx(aAxis, diffs, 'bo')
    ax2 = plt.subplot(122)
    plt.title("Heads/Tails Ratios")
    plt.xlabel('Number of Flips')
    plt.ylabel("#Heads/#Tails")
    ax2.semilogx(aAxis, ratios, 'bo')
    plt.show()
    def handle(self, *args, **options):
        try:
            from matplotlib import pylab as pl
            import numpy as np
        except ImportError:
            raise Exception('Be sure to install requirements_scipy.txt before running this.')

        all_names_and_counts = RawCommitteeTransactions.objects.all().values('attest_by_name').annotate(total=Count('attest_by_name')).order_by('-total')
        all_names_and_counts_as_tuple_and_sorted = sorted([(row['attest_by_name'], row['total']) for row in all_names_and_counts], key=lambda row: row[1])
        print "top ten attestors:  (name, number of transactions they attest for)"
        for row in all_names_and_counts_as_tuple_and_sorted[-10:]:
            print row

        n_bins = 100
        filename = 'attestor_participation_distribution.png'

        x_max = all_names_and_counts_as_tuple_and_sorted[-31][1]  # eliminate top outliers from hist
        x_min = all_names_and_counts_as_tuple_and_sorted[0][1]

        counts = [row['total'] for row in all_names_and_counts]
        pl.figure(1, figsize=(18, 6))
        pl.hist(counts, bins=np.arange(x_min, x_max, (float(x_max)-x_min)/100) )
        pl.title('Histogram of Attestor Participation in RawCommitteeTransactions')
        pl.xlabel('Number of transactions a person attested for')
        pl.ylabel('Number of people')
        pl.savefig(filename)
Example #26
0
    def XXtest5_regrid(self):
        srcF = cdms2.open(sys.prefix + \
                              '/sample_data/so_Omon_ACCESS1-0_historical_r1i1p1_185001-185412_2timesteps.nc')
        so = srcF('so')[0, 0, ...]
        clt = cdms2.open(sys.prefix + '/sample_data/clt.nc')('clt')
        dstData = so.regrid(clt.getGrid(), 
                            regridTool = 'esmf', 
                            regridMethod='conserve')

        if self.pe == 0:
            dstDataMask = (dstData == so.missing_value)
            dstDataFltd = dstData * (1 - dstDataMask)
            zeroValCnt = (dstData == 0).sum()
            if so.missing_value > 0:
                dstDataMin = dstData.min()
                dstDataMax = dstDataFltd.max()
            else:
                dstDataMin = dstDataFltd.min()
                dstDataMax = dstData.max()
                zeroValCnt = (dstData == 0).sum()
            print 'Number of zero valued cells', zeroValCnt
            print 'min/max value of dstData: %f %f' % (dstDataMin, dstDataMax)                   
            self.assertLess(dstDataMax, so.max())
            if False:
                pylab.figure(1)
                pylab.pcolor(so, vmin=20, vmax=40)
                pylab.colorbar()
                pylab.title('so')
                pylab.figure(2)
                pylab.pcolor(dstData, vmin=20, vmax=40)
                pylab.colorbar()
                pylab.title('dstData')
Example #27
0
    def check_models(self):
        plt.figure('Bandgap narrowing')
        Na = np.logspace(12, 20)
        Nd = 0.
        dn = 1e14
        temp = 300.

        for author in self.available_models():
            BGN = self.update(Na=Na, Nd=Nd, nxc=dn,
                              author=author,
                              temp=temp)

            if not np.all(BGN == 0):
                plt.plot(Na, BGN, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'Bgn.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for name in data.dtype.names[1:]:
            plt.plot(
                data['N'], data[name], 'r--',
                label='PV-lighthouse\'s: ' + name)

        plt.semilogx()
        plt.xlabel('Doping (cm$^{-3}$)')
        plt.ylabel('Bandgap narrowing (K)')

        plt.legend(loc=0)
def item_nbr_tendency(store_nbr):
    '''
    input : store_nbr
    output : graph representing units groupped by each year, each month
    '''
    store = df_1[df_1['store_nbr'] == store_nbr]

    pivot = store.pivot_table(index=['year','month'],columns='item_nbr',values='units',aggfunc=np.sum)
    zero_index = pivot==0
    pivot = pivot[pivot!=0].dropna(axis=1,how='all')
    pivot[zero_index]=0
    
    
    pivot_2012 = pivot.loc[2012]
    pivot_2013 = pivot.loc[2013]
    pivot_2014 = pivot.loc[2014]
    
    plt.figure(figsize=(12,8))
    plt.subplot(131)
    sns.heatmap(pivot_2012,cmap="YlGnBu", annot = True, fmt = '.0f')
    plt.subplot(132)
    sns.heatmap(pivot_2013,cmap="YlGnBu", annot = True, fmt = '.0f')
    plt.subplot(133)
    sns.heatmap(pivot_2014,cmap="YlGnBu", annot = True, fmt = '.0f')
    plt.show()
Example #29
0
def plot_waveforms(time,voltage,APTimes,titlestr):
    """
    plot_waveforms takes four arguments - the recording time array, the voltage
    array, the time of the detected action potentials, and the title of your
    plot.  The function creates a labeled plot showing the waveforms for each
    detected action potential
    """
   
    plt.figure()
   
    ## Your Code Here 
    indices = []
    
    for x in range(len(APTimes)):
        for i in range(len(time)):
            if(time[i]==APTimes[x]):
                indices.append(i)
            

    ##print indices
    Xval = np.linspace(-.003,.003,200)
    print len(Xval)
    for x in range(len(APTimes)):
        plt.plot(Xval, voltage[indices[x]-100:indices[x]+100])
        plt.title(titlestr)
        plt.xlabel('Time (s)')
        plt.ylabel('Voltage (uV)')
        plt.hold(True)

    
    
    plt.show()
Example #30
0
def fdr(p_values=None, verbose=0):
    """Returns the FDR associated with each p value

    Parameters
    -----------
    p_values : ndarray of shape (n)
        The samples p-value

    Returns
    -------
    q : array of shape(n)
        The corresponding fdr values
    """
    p_values = check_p_values(p_values)
    n_samples = p_values.size
    order = p_values.argsort()
    sp_values = p_values[order]

    # compute q while in ascending order
    q = np.minimum(1, n_samples * sp_values / np.arange(1, n_samples + 1))
    for i in range(n_samples - 1, 0, - 1):
        q[i - 1] = min(q[i], q[i - 1])

    # reorder the results
    inverse_order = np.arange(n_samples)
    inverse_order[order] = np.arange(n_samples)
    q = q[inverse_order]

    if verbose:
        import matplotlib.pylab as mp
        mp.figure()
        mp.xlabel('Input p-value')
        mp.plot(p_values, q, '.')
        mp.ylabel('Associated fdr')
    return q
Example #31
0
    if ffmol is None:
        raise Exception("Could not find any of the FF molecules in the reference structure")
    nbonds = len(ffmol.bonds)
    nangles = len(ffmol.angles)

    # Now we can read data, assume it was created by md_bondang.py
    anglist = []
    bondlist = []
    for filename in args.file:
        bd, ad = _read_series(filename, nbonds)
        # This transforms that numpy arrays into TimeSeries objects and thereby estimate densities etc
        bondlist.append([TimeSeries(bd[:, i]) for i in range(nbonds)])
        anglist.append([TimeSeries(ad[:, i], _trans_ang) for i in range(nangles)])

    # Create one plot for bonds and one for angles
    f = plt.figure(1)
    _plot_dists(bondlist, args.labels, ffmol.bonds, f)
    f.savefig(args.out + "_bonddist.png", format="png",dpi=300)

    f = plt.figure(2)
    _plot_dists(anglist, args.labels, ffmol.angles, f)
    f.savefig(args.out + "_angdist.png", format="png",dpi=300)

    # Estimate ff parameters
    try :
        wb = xl.load_workbook(filename = args.excel)
    except :
        print "Could not open the XLSX file. Will create one from scratch"
        wb = xl.Workbook()

    try :
Example #32
0
Z = hierarchy.linkage(kojacc, method=method, optimal_ordering=True)
maxdist = np.max(Z[:, 2])

# clustering (segmentación)
clust = hierarchy.fcluster(Z, maxdist*args.cutoff, criterion='distance')
n_clusts = len(np.unique(clust))       # number of clusters

# dendograms
# colors for clusters
# TODO: Add an option for selecting colormap
dend_colors = cm.jet(np.linspace(0, 1, n_clusts))
hexcolors = [mpl.colors.rgb2hex(rgb[:3]) for rgb in dend_colors]
hierarchy.set_link_color_palette(hexcolors)

# Plot
plt.figure(figsize=(15, 7))
dend = hierarchy.dendrogram(Z, color_threshold=maxdist*args.cutoff,
                            no_labels=True)
plt.axhline(maxdist*args.cutoff, ls='--', alpha=0.3, c='k')
plt.tight_layout()

# legend and colors
if legend:
    legend_elements = []
    for i, rgb in enumerate(dend_colors):
        label = f"Cluster {i+1}"
        element = Line2D([0], [0], color=rgb, label=label, lw=3)
        legend_elements.append(element)
    plt.legend(handles=legend_elements, loc=1)

print(f'[INFO] Output path: {outpath}')
    'alpha': 0.2,
    'width': 1,
    'node_shape': 'X',
}
temp = []
for i in list(G):
    try:
         temp.append(float(i[1]))
    except:
        pass
# temp.sort()
h = enumerate(range(0, len(temp)))

print(temp)
x1 = np.array(temp)  # random data, normal distribution
xs = np.linspace(x1.min() - 1, x1.max() + 1, len(x1))
print(x1)

kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')

fig = plt.figure(figsize=(8, 6))

plt.plot(xs, kde2(xs), 'b-', label="Happiness line")
plt.legend(loc=1)
plt.xlabel("Exercise hours per week")
plt.ylabel("happiness density distribution")
plt.title("Probability density function for exercise versus happiness ")
plt.savefig("m silverman log non ranged estimation.png", dpi=500)
plt.show()
Example #34
0
            line = lines[i]
            info = re.findall(patternKey, line)
            if len(info):
                actualKey = key.replace(" ", "")
                if info[0][0].replace(" ", "").upper().startswith(actualKey):
                    continue
            else:
                return False
        return True
    except:
        return False


if __name__ == "__main__":
    filename = None
    if len(sys.argv) > 1:
        filename = sys.argv[1]
    print("is JCAMP-DX File?", isJcampFile(filename))
    instance = JcampReader(filename)
    print(instance.info)
    x, y = instance.data
    try:
        import matplotlib.pylab as plt
        plt.figure(0)
        plt.plot(x, y)
        plt.xlabel(instance.info.get('XUNITS', 'X'))
        plt.ylabel(instance.info.get('YUNITS', 'Y'))
        plt.show()
    except:
        pass
        welist.append(wecashdic[uid])
    fpr_grd_lm, tpr_grd_lm, _ = roc_curve(uy, ufen)
    fpr_grd_lmw, tpr_grd_lmw, _ = roc_curve(uy, welist, pos_label=0)
    print 'call权重', wei
    print "AUC Score (Train): %f" % metrics.roc_auc_score(uy, ufen)
    # plt.figure(1)
    # plt.plot([0, 1], [0, 1], 'k--')
    # # plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
    # plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT')
    # plt.plot(fpr_grd_lmw, tpr_grd_lmw, label='wecash')
    # plt.xlabel('False positive rate')
    # plt.ylabel('True positive rate')
    # plt.legend(loc='best')
    # plt.show()
    # plt.figure(1)
    # n, bins, patches = plt.hist(welist, 100, normed=1, facecolor='g', alpha=0.75)
    # plt.xlabel('hist')
    # plt.ylabel('Probability')
    # plt.title('Histogram')
    # # plt.text(60,.025, r'$\mu=100,\ \sigma=15$')
    # plt.grid(True)
    # plt.show()

    plt.figure(2)
    n, bins, patches = plt.hist(ufen, 100, normed=1, facecolor='g', alpha=0.75)
    plt.xlabel('hist')
    plt.ylabel('Probability')
    plt.title('Histogram')
    #plt.text(60,.025, r'$\mu=100,\ \sigma=15$')
    plt.grid(True)
    plt.show()
Example #36
0
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
from matplotlib import cm
from scipy.stats import norm,chi2,binom,gamma
import seaborn as sns
from math import cos, sin,log,tan,pi,exp,sqrt,cosh,sinh,tanh,atan,atan2,e
from cmath import exp as cexp, log as clog
from mpl_toolkits.mplot3d import Axes3D

for i,t in enumerate(np.linspace(0,2*pi,360)):
    fig = plt.figure(figsize=(16,16))
    ax = fig.add_subplot(111, projection='3d',facecolor='black')
    p = plt.axis('off')
    x = np.linspace(-2*pi,2*pi,1000)
    y = np.linspace(-2*pi,2*pi,1000)
    xx, yy = np.meshgrid(x, y, sparse=True)
    z = np.cos(xx*yy +6*t+pi)
    ax.view_init(t*180/(pi),t*180/(4*pi))
    ax.set_zlim(-3,3)
    ax.plot_surface(xx, yy, z, antialiased=True, shade=True,cmap=cm.winter,alpha=0.8)
    p = plt.savefig(f'C:/Users/Alejandro/Pictures/RandomPlots/15052020/plot{i}.PNG',facecolor='black')
Example #37
0
history = simple_model.fit(
    train_data_gen,
    epochs=30,
    steps_per_epoch=5,
    verbose=1,
    validation_data=val_data_gen
)

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
# plt.ylim([0,1.5])
plt.title('Training and Validation Loss')
Example #38
0
    logis.fit(x_train, y_train)

    xop = []
    yop = []
    xpe = []
    ype = []
    i = 0
    while i <= x_train.shape[0] - 1:
        if y_train[i] == 1:
            xop.append(x_train[i][0])
            yop.append(x_train[i][1])
        else:
            xpe.append(x_train[i][0])
            ype.append(x_train[i][1])
        i += 1

    fig = plot.figure()
    plot.scatter(xop, yop, color="red")
    plot.scatter(xpe, ype, color="blue")
    plot.xlim((-10, 20))
    plot.ylim((-10, 20))

    X = np.linspace(-10, 10, 30)
    Y = -X * logis.theta[0][1] / logis.theta[0][2] - logis.theta[0][
        0] / logis.theta[0][2]
    plot.plot(X, Y)

    plot.show()
    fig.savefig('lr.jpg')
Example #39
0
'''由于ARMA拟合的是经过相关预处理后的数据,故其
预测值需要通过相关逆变换进行还原'''

# 一阶差分还原
diff_shift_ts = ts_diff_1.shift(1)
diff_recover_1 = predict_ts.add(diff_shift_ts)

# 再次一阶差分还原
rol_shift_ts = rol_mean.shift(1)
diff_recover = diff_recover_1.add(rol_shift_ts)

# 移动平均还原
rol_sum = ts_log.rolling(window=11).sum()
rol_recover = diff_recover * 12 - rol_sum.shift(1)

# 对数还原
log_recover = np.exp(rol_recover)
log_recover.dropna(inplace=True)

# 使用均方根误差(RMSE)来评估模型样本内拟合的好坏
ts = ts[log_recover.index]  # 过滤没有预测的记录
plt.figure(facecolor='white')
log_recover.plot(color='blue', label='Predict')
ts.plot(color='red', label='Original')
plt.legend(loc='best')
plt.title('RMSE: %.4f' % np.sqrt(sum((log_recover - ts)**2) / ts.size))
plt.show()



Example #40
0
    turb_predictions.append(yhat)
    obs = test[t]
    turb_history.append(obs)
    diff = obs - yhat
    turb_diff.append(diff)
    print(
        'TurbParameter Index= %d, predicted=%f, expected=%f, difference = %f' %
        (k, yhat, obs, diff))
    k = k + 1
    if (k == 3000):
        break
#test1, test2 = tts(test,test_size = 337, random_state=0, shuffle=False)
error = mean_squared_error(test1, predictions)
print('Test MSE: %.3f' % error)
# plot
plt.figure(figsize=(12, 8))
pyplot.plot(test1, label="Original")
pyplot.plot(predictions, color='red', label='Predicted')
plt.xlabel("Datatime Index ")
plt.ylabel("Turbidity Values")
plt.title('Turbidity Forecast')
plt.legend()
plt.show()
#%%
# SARIMA example
from statsmodels.tsa.statespace.sarimax import SARIMAX
from random import random

# fit model
model = SARIMAX(turb_test, order=(1, 1, 1), seasonal_order=(1, 1, 1, 1))
model_fit = model.fit(disp=False)
Example #41
0
# Gaussian distributions
a = gauss(n, m=20, s=5)  # m= mean, s= std

B = np.zeros((n, n_target))

for i, m in enumerate(lst_m):
    B[:, i] = gauss(n, m=m, s=5)

# loss matrix and normalization
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'euclidean')
M /= M.max()
M2 = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'sqeuclidean')
M2 /= M2.max()
#%% plot the distributions

pl.figure(1)
pl.subplot(2, 1, 1)
pl.plot(x, a, 'b', label='Source distribution')
pl.title('Source distribution')
pl.subplot(2, 1, 2)
pl.plot(x, B, label='Target distributions')
pl.title('Target distributions')
pl.tight_layout()

#%% Compute and plot distributions and loss matrix

d_emd = ot.emd2(a, B, M)  # direct computation of EMD
d_emd2 = ot.emd2(a, B, M2)  # direct computation of EMD with loss M3

pl.figure(2)
pl.plot(d_emd, label='Euclidean EMD')
Example #42
0
def draw_trend(timeSeries, size=12):
	'''移动平均图'''
	f = plt.figure(facecolor='white')
Example #43
0
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
from math import cos, sin, log, tan, gamma, pi, exp, sqrt
m = 0
for y in np.linspace(0, 18, 390):
    p = plt.figure(figsize=(12, 12), facecolor='black')
    p = plt.axis('off')
    p = plt.xlim(-2.1, 2.1)
    p = plt.ylim(-2.1, 2.1)
    plt.plot([
        cos(t) * (1 - cos(t * y) * sin(y * t))
        for t in np.linspace(0, 2 * pi, 500)
    ], [
        sin(t) * (1 - cos(t * y) * sin(y * t))
        for t in np.linspace(0, 2 * pi, 500)
    ],
             lw=7,
             color=plt.cm.PiYG(y / 18))
    plt.savefig(
        f'C:/Users/Alejandro/Pictures/RandomPlots/19122019/plot{m}.png',
        facecolor='black')
    m += 1
Example #44
0

r = 1.
a = 0.1
z = 1.5
e = 0.75

t = sc.linspace(0, 15, 1000)

R0 = 10
C0 = 5
RC0 = sc.array([R0, C0])

pops, infodict = integrate.odeint(dCR_dt, RC0, t, full_output=True)

f1 = p.figure()
p.plot(t, pops[:, 0], 'g-', label='Resource density')  # Plot
p.plot(t, pops[:, 1], 'b-', label='Consumer density')
p.grid()
p.legend(loc='best')
p.xlabel('Time')
p.ylabel('Population density')
p.title('Consumer-Resource population dynamics')
f1.savefig('../Results/LV1_model.pdf')

# Phase Plot script

f2 = p.figure()
p.plot(pops[:, 0], pops[:, 1], 'r-')  # Plot
p.grid()
p.xlabel('Resource density')
Example #45
0
d = {
    'training accuracy': '/*dbg_acc_train*.npy',
    'accuracy val': '/*dbg_acc_val_*.npy',
    'accuracy other ds': '/*dbg_acc_other_ds_*.npy',
    # 'loss ae': '/*dbg_ae_cost_*.npy',
    # 'loss lr': '/*dbg_lr_cost_*.npy',
    # 'loss combined': '/*dbg_combined_cost_*.npy'
}

path_main = 'nips3mm_scarcity'

for k, v in d.iteritems():
    pkgs = glob.glob(RES_NAME + v)
    for n_comp in n_comps:
        plt.figure()
        for p in pkgs:
            lambda_param = np.float(re.search('lambda=(.{4})', p).group(1))
            # n_hidden = int(re.search('comp=(?P<comp>.{1,2,3})_', p).group('comp'))
            n_hidden = int(re.search('comp=(.{1,3})_', p).group(1))
            max_samples = int(re.search('max(.{1,4})_', p).group(1))
            rest_samples = int(re.search('_r(.{2,4})_', p).group(1))
            task_samples = int(re.search('_t(.{2,4})dbg', p).group(1))
            if n_comp != n_hidden:
                continue

            if (task_samples == 1000):
                continue

            cur_values = np.load(p)
Example #46
0
qpoases_stats = pd.read_csv('qpoases_stats.csv')

# Get mean number of iterations and runtime for each number of parameters
osqp_mean = osqp_stats.groupby('n').mean()
gurobi_mean = gurobi_stats.groupby('n').mean()
qpoases_mean = qpoases_stats.groupby('n').mean()

# Save mean stats in a CSV file
osqp_mean.columns = ['osqp_iter', 'osqp_runtime']
gurobi_mean.columns = ['gurobi_iter', 'gurobi_runtime']
qpoases_mean.columns = ['qpoases_iter', 'qpoases_runtime']
mean_stats = pd.concat([osqp_mean, gurobi_mean, qpoases_mean], axis=1)
mean_stats.to_csv('lasso_mean_stats.csv')

# Plot mean runtime
plt.figure()
ax = plt.gca()
plt.semilogy(mean_stats.index.values,
             mean_stats['osqp_runtime'].get_values(),
             color='C0',
             label='OSQP')
plt.semilogy(mean_stats.index.values,
             mean_stats['qpoases_runtime'].get_values(),
             color='C1',
             label='qpOASES')
plt.semilogy(mean_stats.index.values,
             mean_stats['gurobi_runtime'].get_values(),
             color='C5',
             label='GUROBI')
plt.legend()
plt.grid()
Example #47
0
rows = []
file_to_open = sys.argv[1] if len(sys.argv) > 1 else "attention_data.jsonl"
with open(file_to_open) as fr:
    lines = fr.readlines()
for epoch_index, l in enumerate(lines, 1):
    data = json.loads(l.strip())
    x = data["source"]
    y = data["target"]
    if x_axis:
        assert x == x_axis
        assert y == y_axis
    x_axis = x
    y_axis = y
    row = np.array(data['attention'])
    x = x[0]
    y = y[0]
    sub_title = " ".join(x.split()[1:-1]) + " ->\n" + " ".join(y.split()[1:-1])
    plt.figure(epoch_index)
    full_title = "attention based alignment epoch " + str(
        epoch_index) + ":\n" + sub_title
    # ax.set_title(full_title)
    plt.title(full_title, fontsize=8)
    ax = sns.heatmap(row,
                     xticklabels=x.split(),
                     yticklabels=y.split()[1:],
                     cmap="Blues",
                     annot=True,
                     fmt='.2f')
    # ax = sns.heatmap(row, linewidth=0.5,yticklabels=y)
    # ax.set_yticks(y[0].split()[1:])
    plt.savefig("heat_map_epoch_" + str(epoch_index) + ".png")
Example #48
0
recgen_u_col.params['recordings']['noise_color'] = True
recgen_u_col.params['recordings']['seed'] = 1
recgen_u_col.generate_recordings()

recgen_dc_col = deepcopy(recgen_u)
recgen_dc_col.params['recordings']['noise_mode'] = 'distance-correlated'
recgen_dc_col.params['recordings']['noise_color'] = True
recgen_dc_col.params['recordings']['seed'] = 2
recgen_dc_col.generate_recordings()

recgen_far = deepcopy(recgen_u)
recgen_far.params['recordings']['noise_mode'] = 'far-neurons'
recgen_far.params['recordings']['seed'] = 3
recgen_far.generate_recordings()

fig1 = plt.figure(figsize=(9, 9))

gs = gridspec.GridSpec(16, 19)

ax_u = fig1.add_subplot(gs[:4, :3])
ax_dc = fig1.add_subplot(gs[:4, 4:7])
ax_ucol = fig1.add_subplot(gs[:4, 8:11])
ax_dccol = fig1.add_subplot(gs[:4, 12:15])
ax_far = fig1.add_subplot(gs[:4, 16:19])

max_rec = 0
for rec in [recgen_u, recgen_dc, recgen_dc_col, recgen_u_col, recgen_far]:
    max_v = np.max(rec.recordings)
    if max_v > max_rec:
        max_rec = max_v
Example #49
0
    rho = np.zeros((Nx, Ny))
    rho[20:Nx-20, Ny//2:Ny//2+20] = 0.7
    rho[Nx//2:Nx//2+5, 20:Ny-20] = 1
    # plt.imshow(rho)
    # plt.show()

    design_region = np.ones((Nx, Ny))

    W = get_W(Nx, Ny, design_region, NPML, R=R)

    rhot = rho2rhot(rho, W)
    rhob = rhot2rhob(rhot, eta=eta, beta=beta)
    eps = rhob2eps(rhob, eps_m=eps_m)

    f = plt.figure(figsize=(8, 4))
    RTs = np.linspace(0,1,1000)
    plt.plot(RTs, rhot2rhob(RTs, eta=0.5, beta=0.1))
    plt.plot(RTs, rhot2rhob(RTs, eta=0.5, beta=10))
    plt.plot(RTs, rhot2rhob(RTs, eta=0.5, beta=1000))
    plt.legend((r"$\beta = .1$", r"$\beta = 10$", r"$\beta = 1000$"))
    plt.xlabel(r"$\tilde{\rho}_i$")
    plt.ylabel(r"$\bar{\rho}_i$")
    plt.title(r"projection with varying $\beta$ ($\eta=0.5$)")
    # plt.savefig('data/figs/img/project.pdf', dpi=300)
    plt.show()

    # # change in projected density with respect to filtered density


    def circle(Nx, Ny, R=10):
Example #50
0
    BINS = np.linspace(0, 1.0, BIN_N + 1)
    ROW_N = len(data)

    hist_view = np.zeros((ROW_N, BIN_N))
    for row_i, row in enumerate(data):

        x, _ = np.histogram(row, bins=BINS)
        hist_view[row_i] = x
    # sort hist by original permutation
    hist_view = hist_view[r['order_permutation']]
    truths = truth[r['order_permutation']]

    a = assignments[-1]

    ai = np.argsort(a).flatten()
    f = pylab.figure(figsize=(4, 12))
    ax = f.add_subplot(1, 3, 1)
    ax.imshow(hist_view, interpolation='nearest')
    ax = f.add_subplot(1, 3, 2)

    ax.imshow(hist_view[ai], interpolation='nearest')
    for i in np.argwhere(np.diff(a[ai]) > 0).flatten():
        ax.axhline(i + 0.5, c='w')

    ax = f.add_subplot(1, 3, 3)
    ax.imshow(hist_view[np.argsort(truths).flatten()], interpolation='nearest')
    for i in np.argwhere(np.diff(np.sort(truths)) > 0).flatten():
        ax.axhline(i + 0.5, c='w')

    f.savefig(clusters_plot)
Example #51
0
    def plot_mass_function(self, units='Msol h**-1', kern='ST', ax=None,\
                     plot_Tvir=False, label_z=False, nbins=100, label=None, show=False, **kwargs):
        '''
        Plot the Halo mass function and (optionally) Tvir on twinx axis

        Params:
            kern: The analytical kernal to use
            plot_Tvir: Calculates the Virial temperature in Kelvin for a halo of a given mass using equation 26 of Barkana & Loeb.
        '''
        import matplotlib.pylab as plt
        from seren3.analysis.plots import fit_scatter

        snapshot = self.base
        if ax is None:
            fig = plt.figure()
            ax = fig.add_subplot(111)

        if label_z:
            label = "%s z=%1.3f" % (label, self.base.z)

        c = kwargs.pop("color", "b")

        mbinmps, y, mbinsize = self.mass_function(units=units,
                                                  nbins=nbins,
                                                  **kwargs)
        # ax.semilogy(mbinmps, y, 'o', label=label, color=c)

        bin_centers, mean, std = fit_scatter(mbinmps, y)
        # ax.errorbar(bin_centers, mean, yerr=std, color=c)
        e = ax.errorbar(bin_centers, mean, yerr=std, color=c,\
            fmt="o", markerfacecolor=c, mec='k', capsize=2, capthick=2, elinewidth=2, linestyle="-", linewidth=2., label=label)
        if plot_Tvir:
            import cosmolopy.perturbation as cp
            cosmo = snapshot.cosmo
            M_ticks = np.array(ax.get_xticks())

            mass = self.base.array(10**M_ticks, units)
            Tvir = [
                float("%1.3f" % np.log10(v))
                for v in cp.virial_temp(mass, **cosmo)
            ]

            ax2 = ax.twiny()
            ax2.set_xticks(M_ticks - M_ticks.min())
            ax2.set_xticklabels(Tvir)
            ax2.set_xlabel(r"log$_{10}$(T$_{\mathrm{vir}}$ [K])")

        if kern is not None:
            import pynbody

            # We only need to load one CPU to setup the params dict
            s = pynbody.snapshot.ramses.RamsesSnap(
                "%s/output_%05d" % (snapshot.path, snapshot.ioutput), cpus=[1])
            M_kern, sigma_kern, N_kern = pynbody.analysis.halo_mass_function(
                s, kern=kern, log_M_min=mbinmps.min(), log_M_max=mbinmps.max())

            # Convert to correct units
            M_kern.convert_units(mbinmps.units)
            N_kern.convert_units(y.units)

            # ax.semilogy(np.log10(M_kern*(snapshot.info['H0']/100)), N_kern, label=kern)
            ax.semilogy(np.log10(M_kern),
                        N_kern,
                        label=kern,
                        color="grey",
                        linestyle="--")

        ax.set_xlabel(r'log$_{10}$(M [$%s$])' % mbinmps.units.latex())
        ax.set_ylabel('dN / dlog$_{10}$(M [$%s$])' % y.units.latex())

        if "title" in kwargs:
            ax.set_title(kwargs.get("title"))

        if show:
            ax.legend()
            plt.show()
Example #52
0
def get_data():
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(18, GPIO.OUT)
    GPIO.output(18, False)
    
    try:
        ser = serial.Serial('/dev/ttyUSB0', 9600)
    except:
        print 'could not find arduino'
    try:
        my_file = open("flowers.txt", 'a')
        count = 0
        while count < 25:
            line = ser.readline()
            my_file.write(line)
            print(line)
            count = count + 1
        my_file.close()
    except:
        print 'couldnt write data to file'
    
    
    ifile = open('flowers.txt', 'r')
    temp = []
    water = []
    humidity = []

    for line in ifile.readlines():
        line = line.strip()
        print line
        if line[:5] == 'Water':
            try:
                water.append(float(line.split(':')[-1]))
            except:
                print 'serial error'
        elif line[:5] == 'Curre':
            try:
                humidity.append(float(line.split('=')[-1][:-1]))
            except:
                print 'serial error'
        elif line[:5] == 'tempe':
            try:
                temp.append(float(line.split('=')[-1][:-1]))
            except:
                print 'serial error'

    print temp
    print water
    print humidity

    pylab.figure(1)
    fig, ax1 = plt.subplots()
    pylab.plot(range(len(temp)), temp, 'ro-')
    pylab.xlabel('Time')
    pylab.ylabel('Temp')
    pylab.ylim(0, 100)
    ax2 = ax1.twinx()
    ax2.plot(range(len(humidity)), humidity, 'go-')
    ax2.set_ylabel('Humidity')
    ax2.set_ylim(0, 100)
    pylab.savefig('temperature.png')
    pylab.close()
    
    pylab.figure(2)
    pylab.plot(range(len(water)), water, 'bo-')
    pylab.xlabel('Time')
    pylab.ylabel('Water')
    pylab.ylim(0, 1024)
    pylab.savefig('water.png')
    
    ifile.close()
    
    return template("plot")
Example #53
0
# lists
categories = [
    'house', 'visage', 'animal', 'scene', 'tool', 'pseudoword', 'characters',
    'scrambled'
]
scores_spc = np.load('%s/../scores_sid_pid_cat.npy' % INDIR).item()

# aggregate how many probes are predictive of how many categories
counts = np.zeros(9, dtype=np.int32)
for sid in scores_spc.keys():
    for pid in scores_spc[sid].keys():
        counts[len(scores_spc[sid][pid])] += 1

# plot the histogram
fig = plt.figure(figsize=(4, 8), dpi=300)

plt.barh(range(8), counts[1:], color="green")
plt.xlabel('Number of probes', size=16)
plt.ylabel('Number of categories a probe predicts', size=16)
plt.yticks(range(8), np.arange(1, 9), size=14)
plt.xticks([0, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400],
           [0, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400],
           size=12,
           rotation=90)
for i, v in enumerate(counts[1:]):
    if i == 0:
        plt.gca().text(v - 65,
                       i + 0.1,
                       str(counts[1:][i]),
                       color='black',
Example #54
0
def fluxExtract(science,bias,dark,flats,hdr,plotap=False,rad=18,plotname=None):
    ' Returns final flux of source '
    # Calibrate Image. Flatten? add bias and flat to input
    flagg = 0
    
    # Defaul shape of sbig 2x2 binning
    xsize,ysize = 1266,1676
    if xsize != np.shape(bias)[0]:
        print 'WARNING: check that size of science file matches assumptions'

    if flats == 0:
        flat = np.ones((xsize,ysize))
    else:
        flat = flats[hdr['FILTER']]
    
    if dark == 0:
        dark = np.ones((xsize,ysize))
    else:
        dark = dark[str(hdr['EXPTIME'])]

    if np.mean(bias) == 0:
        bias = np.zeros((xsize,ysize))
    else:
        bias = bias

    # Size of science to trim calib frames (which are always full)
    # Science frames either full or sub: 400x400 around center ()
    centx, centy = int(1663/2), int(1252/2) # use config file in future, x and y are switched from thesky vs pyfits???
    subframe_size = np.shape(science)
    dx,dy         = subframe_size[0]/2,subframe_size[0]/2
    
    l,b,r,t = centx - dx, centy+dy, centx+dx, centy-dy # top and bottom switched

    data = (science - bias[t:b,l:r] - dark[t:b,l:r])/flat[t:b,l:r]
    
    if np.mean(data) < 0:
        flagg += 1
        
    # Get source x,y position
    x,y = sourceFinder(science)
    positions = (x,y)
    
    if x==0:
        flagg += 1
    
    if y==0:
        flagg += 1
        
    # Define Apertures
    apertures = CircularAperture(positions, r=rad)
    annulus_apertures = CircularAnnulus(positions, r_in=rad+5, r_out=rad+20)

    # Get fluxes
    rawflux_table = aperture_photometry(data, apertures)
    bkgflux_table = aperture_photometry(data, annulus_apertures)
    bkg_mean = bkgflux_table['aperture_sum'] / annulus_apertures.area()
    bkg_sum = bkg_mean * apertures.area()
    final_sum = rawflux_table['aperture_sum'] - bkg_sum

    # Plot
    if plotap == True:
        plt.ioff()
        plt.figure(-999)
        plt.clf()
        plt.imshow(np.log(data), origin='lower')
        apertures.plot(color='red', lw=1.5, alpha=0.5)
        annulus_apertures.plot(color='orange', lw=1.5, alpha=0.5)
        plt.savefig(plotname)

    return final_sum[0],flagg,x,y,bkg_mean[0]
    ax = pl.subplot(gs[r, 1])  # row 0, col 0
    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)
    pl.imshow(np.fliplr(d))


dataset = h5py.File(
    '/hpf/largeprojects/ccm/devin/plastics-data/general/create_2d_array/2D_data_color_1_ds8_uni_split.h5',
    'r')
data = dataset.get('train_data_im')[:5]

d = data
d2 = np.fliplr(d)

print d.shape
print d2.shape

# Create 2x2 sub plots
gs = gridspec.GridSpec(5, 2)

pl.figure()
add_plot(pl, data[0], 0)
add_plot(pl, data[1], 1)
add_plot(pl, data[2], 2)
add_plot(pl, data[3], 3)
add_plot(pl, data[4], 4)

#plt.show()
plt.savefig("2d_demo_flip.png")
Example #56
0
        allcontactpairs[h] = True
for h in histo2.keys():
    if not h in allcontactpairs:
        allcontactpairs[h] = True

print "num. contacts in histogram:", len(histo1), len(histo2)

datafile = open(
    mergeStrings(inpdb1, inpdb2).replace("/", "_") + "_contactDiff.csv", 'w')

#print "pos1,pos2,co,p1,p2,dp,nat1?,nat2?"
datafile.write("pos1,pos2,co,p1,p2,dp,nat1?,nat2?\n")

rows = []

p.figure(dpi=100)
for c in allcontactpairs:
    if c in histo1:
        p1 = histo1[c]
    else:
        p1 = 0.0
    if c in histo2:
        p2 = histo2[c]
    else:
        p2 = 0.0

    res1 = c[0]
    res2 = c[1]
    co = abs(c[0] - c[1])
    dp = p2 - p1
    maxp = max(p1, p2)
Example #57
0
''' Plot Roll noise along track (no location coordinate) '''
import matplotlib.pylab as plt
import numpy
import matplotlib as mpl
import swotsimulator.rw_data as rw_data
import numpy
import params as p

filegrid = p.indatadir + 'OREGON_grd.nc'
fileswot = p.outdatadir + 'OREGON_swot292_c01_p024.nc'
vmin = -0.10
vmax = 0.10
fig = plt.figure(figsize=(30, 10))
tloc = 0.11
tfont = 24
data = rw_data.Sat_SWOT(file=fileswot)
data.load_swath(phase_err=[], x_ac=[], x_al=[])
x_al, x_ac = numpy.meshgrid(data.x_al, data.x_ac)
x_al = x_al - numpy.min(numpy.min(x_al))
SSH = data.phase_err
stitle = 'Phase error along swath'
nac = numpy.shape(data.lon)[1]
SSH[abs(SSH) > 1000] = numpy.nan
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
SSH[SSH == 0] = numpy.nan
SSH = numpy.ma.array(SSH, mask=numpy.isnan(SSH))
print(numpy.shape(x_al), numpy.shape(x_ac), numpy.shape(SSH))
col = plt.pcolormesh(x_al[(nac / 2), :],
                     x_ac[:int(nac / 2), 0],
                     numpy.transpose(SSH[:, :int(nac / 2)]),
                     norm=norm)
Example #58
0
##popt, pcov = curve_fit(func, size[0:3], energy[0:3])
popt, pcov = curve_fit(func, size, energy, maxfev=10000)
pre      = popt[0]
exponent = popt[1]
const    = popt[2]

x=[i for i in np.arange(0.5*np.min(size),2.0*np.max(size),0.05)]
x = np.array(x)
yvals=func(x, pre, exponent, const)
#print('cell size equal 80', func(80, pre, exponent, const) )
#print('The denominator  = ', pre*exponent )
#print('formation energy = ', func(2000, pre, exponent, const) )
#print('formation energy = ', const )
'''

plt.figure(figsize=(18.5, 10.5))
ax = plt.subplot(1, 1, 1)
#--------------------------------------------------------------------
ax.tick_params(axis='x', pad=15)  # distance between axis and text
ax.tick_params(axis='y', pad=15)
#--------------------------------------------------------------------
ax.spines['left'].set_linewidth(3.0)
ax.spines['right'].set_linewidth(3.0)
ax.spines['top'].set_linewidth(3.0)
ax.spines['bottom'].set_linewidth(3.0)

#===================================================================================
plt.minorticks_on()
plt.tick_params(which='major', direction='in', width=3.0, length=12)  #
plt.tick_params(which='minor', direction='in', width=3.0, length=5)
plt.xticks(fontsize=36)
Example #59
0
result_std.to_csv('result_std.csv',encoding='utf-8', index=False)   
result_var.to_csv('result_var.csv',encoding='utf-8', index=False) 
  

#plotando os resultados   
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns


Result = pd.read_csv('RESULT.csv')
Result = Result.iloc[:,1:]
#Result= pd.read_csv('result_mean.csv' ) 

plt.figure(1)
sns.set(rc={'figure.figsize':(7,5)})
sns.set_style("whitegrid")
ylen = 0.5
#sns.set_style("darkgrid", {"axes.facecolor": ".9"})
sns.regplot(data=Result, x="SMAPE1", y="MF1", fit_reg=False, marker="+", color="skyblue")
p1=sns.regplot(data=Result, x="SMAPE1", y="MF1", fit_reg=False, marker="o", color="skyblue", scatter_kws={'s':200}) 
for i in range(5):
    p1.text(Result.iloc[i,1], Result.iloc[i,7]+ylen, Result.iloc[i,0], horizontalalignment='center', size='larger', color='black', weight='semibold')

sns.regplot(data=Result, x="SMAPE2", y="MF2", fit_reg=False, marker="+", color="red")
p1=sns.regplot(data=Result, x="SMAPE2", y="MF2", fit_reg=False, marker="o", color="red", scatter_kws={'s':200}) 
for i in range(5):
    p1.text(Result.iloc[i,2], Result.iloc[i,8]+ylen, Result.iloc[i,0], horizontalalignment='center', size='larger', color='black', weight='semibold')

sns.regplot(data=Result, x="SMAPE(%)", y="MF(%)", fit_reg=False, marker="+", color="yellow")
Example #60
0
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
import seaborn as sns
from math import cos, sin, log, tan, gamma, pi, exp, sqrt

p = plt.figure(figsize=(14, 14), facecolor='black', dpi=400)
p = plt.axis('off')
for z in range(200):
    rango = range(z * 10, (z + 1) * 10)
    plt.scatter([z * sin(z) * cos(z) * cos(z) for z in rango],
                [cos(z) * sin(z) * z**2 for z in rango],
                lw=4.5,
                alpha=0.7,
                color=plt.cm.Wistia(np.random.uniform(0, 1)))
plt.savefig(f'C:/Users/Alejandro/Pictures/RandomPlots/27012020.png',
            facecolor='black')