Esempio n. 1
0
    def plotFittingResults(self):
        """
        Plot results of Rmax optimization procedure and best fit of the experimental data
        """
        _listFitQ = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitQ()]
        _listFitValues = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitValues()]
        _listExpQ = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataQ()]
        _listExpValues = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataValues()]

        #_listExpStdDev = None
        #if self.getDataInput().getExperimentalDataStdDev():
        #    _listExpStdDev = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataStdDev()]
        #if _listExpStdDev:
        #    pylab.errorbar(_listExpQ, _listExpValues, yerr=_listExpStdDev, linestyle='None', marker='o', markersize=1,  label="Experimental Data")
        #    pylab.gca().set_yscale("log", nonposy='clip')
        #else:         
        #    pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5,  label="Experimental Data")

        pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5, label="Experimental Data")
        pylab.semilogy(_listFitQ, _listFitValues, label="Fitting curve")
        pylab.xlabel('q')
        pylab.ylabel('I(q)')
        pylab.suptitle("RMax : %3.2f. Fit quality : %1.3f" % (self.getDataInput().getRMax().getValue(), self.getDataOutput().getFitQuality().getValue()))
        pylab.legend()
        pylab.savefig(os.path.join(self.getWorkingDirectory(), "gnomFittingResults.png"))
        pylab.clf()
Esempio n. 2
0
def disp_results(fig, ax1, ax2, loss_iterations, losses, accuracy_iterations, accuracies, accuracies_iteration_checkpoints_ind, fileName, color_ind=0):
    modula = len(plt.rcParams['axes.color_cycle'])
    acrIterations =[]
    top_acrs={}
    if accuracies.size:
        if 	accuracies.size>4:
		    top_n = 4
        else:
            top_n = accuracies.size -1		
        temp = np.argpartition(-accuracies, top_n)
        result_indexces = temp[:top_n]
        temp = np.partition(-accuracies, top_n)
        result = -temp[:top_n]
        for acr in result_indexces:
            acrIterations.append(accuracy_iterations[acr])
            top_acrs[str(accuracy_iterations[acr])]=str(accuracies[acr])

        sorted_top4 = sorted(top_acrs.items(), key=operator.itemgetter(1))
        maxAcc = np.amax(accuracies, axis=0)
        iterIndx = np.argmax(accuracies)
        maxAccIter = accuracy_iterations[iterIndx]
        maxIter =   accuracy_iterations[-1]
        consoleInfo = format('\n[%s]:maximum accuracy [from 0 to %s ] = [Iteration %s]: %s ' %(fileName,maxIter,maxAccIter ,maxAcc))
        plotTitle = format('max accuracy(%s) [Iteration %s]: %s ' % (fileName,maxAccIter, maxAcc))
        print (consoleInfo)
        #print (str(result))
        #print(acrIterations)
       # print 'Top 4 accuracies:'		
        print ('Top 4 accuracies:'+str(sorted_top4))		
        plt.title(plotTitle)
    ax1.plot(loss_iterations, losses, color=plt.rcParams['axes.color_cycle'][(color_ind * 2 + 0) % modula])
    ax2.plot(accuracy_iterations, accuracies, plt.rcParams['axes.color_cycle'][(color_ind * 2 + 1) % modula], label=str(fileName))
    ax2.plot(accuracy_iterations[accuracies_iteration_checkpoints_ind], accuracies[accuracies_iteration_checkpoints_ind], 'o', color=plt.rcParams['axes.color_cycle'][(color_ind * 2 + 1) % modula])
    plt.legend(loc='lower right') 
Esempio n. 3
0
def plot_many_corr_delta_vel():
    pl.clf()
    leg = []
    for kmax in [0.05, 0.1, 0.2, 0.5, 1., 2., 5.]:
        plot_corr_delta_vel(kmin=1e-3, kmax=kmax, doclf=False)
        leg.append('kmax=%0.2f'%kmax)
    pl.legend(leg)
Esempio n. 4
0
    def check_models(self):
        temp = np.logspace(0, np.log10(600))
        num = len(self.available_models())

        fig, ax = plt.subplots(1)
        self.plotting_colours(num, fig, ax, repeats=2)

        for author in self.available_models():
            Nc, Nv = self.update(temp=temp, author=author)
            # print Nc.shape, Nv.shape, temp.shape
            ax.plot(temp, Nc, '--')
            ax.plot(temp, Nv, '.', label=author)

        ax.loglog()
        leg1 = ax.legend(loc=0, title='colour legend')

        Nc, = ax.plot(np.inf, np.inf, 'k--', label='Nc')
        Nv, = ax.plot(np.inf, np.inf, 'k.', label='Nv')

        plt.legend([Nc, Nv], ['Nc', 'Nv'], loc=4, title='Line legend')
        plt.gca().add_artist(leg1)

        ax.set_xlabel('Temperature (K)')
        ax.set_ylabel('Density of states (cm$^{-3}$)')
        plt.show()
Esempio n. 5
0
def make_corr1d_fig(dosave=False):
    corr = make_corr_both_hemi()
    lw=2; fs=16
    pl.figure(1)#, figsize=(8, 7))
    pl.clf()
    pl.xlim(4,300)
    pl.ylim(-400,+500)    
    lambda_titles = [r'$20 < \lambda < 30$',
                     r'$30 < \lambda < 40$',
                     r'$\lambda > 40$']
    colors = ['blue','green','red']
    for i in range(3):
        corr1d, rcen = corr_1d_from_2d(corr[i])
        ipdb.set_trace()
        pl.semilogx(rcen, corr1d*rcen**2, lw=lw, color=colors[i])
        #pl.semilogx(rcen, corr1d*rcen**2, 'o', lw=lw, color=colors[i])
    pl.xlabel(r'$s (Mpc)$',fontsize=fs)
    pl.ylabel(r'$s^2 \xi_0(s)$', fontsize=fs)    
    pl.legend(lambda_titles, 'lower left', fontsize=fs+3)
    pl.plot([.1,10000],[0,0],'k--')
    s_bao = 149.28
    pl.plot([s_bao, s_bao],[-9e9,+9e9],'k--')
    pl.text(s_bao*1.03, 420, 'BAO scale')
    pl.text(s_bao*1.03, 370, '%0.1f Mpc'%s_bao)
    if dosave: pl.savefig('xi1d_3bin.pdf')
Esempio n. 6
0
 def ROC(self, x):
     """
     ROC curve for seperating positive Gamma distribution
     from two other modes, predicted by current parameter values
     -x: vector of observations
     
     Output: P
     P[0]: False positive rates
     P[1]: True positive rates
     """
     import matplotlib.pylab as mp
     p = len(x)
     P = np.zeros((2,p))
     #False positives
     P[0] = (self.mixt[0]*st.gamma.cdf(-x,self.shape_n,scale=self.scale_n)
              + self.mixt[1]*st.norm.sf(x,0,np.sqrt(self.var)))/\
              (self.mixt[0] + self.mixt[1])
     #True positives
     P[1] = st.gamma.sf(x,self.shape_p,scale=self.scale_p)
     mp.figure()
     I = P[0].argsort()
     mp.plot(P[0,I],P[0,I],'r-')
     mp.plot(P[0,I],P[1,I],'g-')
     mp.legend(('False positive rate','True positive rate'))
     return P
Esempio n. 7
0
def study_multiband_planck(quick=True):
    savename = datadir+'cl_multiband.pkl'
    bands = [100, 143, 217, 'mb']
    if quick: cl = pickle.load(open(savename,'r'))
    else:
        cl = {}
        mask = load_planck_mask()
        mask_factor = np.mean(mask**2.)
        for band in bands:
            this_map = load_planck_data(band)
            this_cl = hp.anafast(this_map*mask, lmax=lmax)/mask_factor
            cl[band] = this_cl
        pickle.dump(cl, open(savename,'w'))


    cl_theory = {}
    pl.clf()
    
    for band in bands:
        l_theory, cl_theory[band] = get_cl_theory(band)
        this_cl = cl[band]
        pl.plot(this_cl/cl_theory[band])
        
    pl.legend(bands)
    pl.plot([0,4000],[1,1],'k--')
    pl.ylim(.7,1.3)
    pl.ylabel('data/theory')
    def check_models(self):
        '''
        Displays a plot of the models against that taken from a
        respected website (https://www.pvlighthouse.com.au/)
        '''
        plt.figure('Intrinsic bandgap')
        t = np.linspace(1, 500)

        for author in self.available_models():

            Eg = self.update(temp=t, author=author, multiplier=1.0)
            plt.plot(t, Eg, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'iBg.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for temp, name in zip(data.dtype.names[0::2], data.dtype.names[1::2]):
            plt.plot(
                data[temp], data[name], '--', label=name)

        plt.xlabel('Temperature (K)')
        plt.ylabel('Intrinsic Bandgap (eV)')

        plt.legend(loc=0)
        self.update(temp=0, author=author, multiplier=1.01)
Esempio n. 9
0
    def show(self, x):
        """
        vizualisation of the mm based on the empirical histogram of x

        Parameters
        ----------
        x: array of shape(nbitems): the data to be processed
        """
        step = 3.5*np.std(x)/np.exp(np.log(np.size(x))/3)
        bins = max(10,int((x.max()-x.min())/step))
        h,c = np.histogram(x, bins)
        h = h.astype(np.float)/np.size(x)
        p = self.mixt
        
        dc = c[1]-c[0]
        y = (1-p)*_gaus_dens(self.mean,self.var,c)*dc
        z = np.zeros(np.size(c))
        i = np.ravel(np.nonzero(c>0))
        z = _gam_dens(self.shape,self.scale,c)*p*dc
        
        import matplotlib.pylab as mp
        mp.figure()
        mp.plot(0.5 *(c[1:] + c[:-1]),h)
        mp.plot(c,y,'r')
        mp.plot(c,z,'g')
        mp.plot(c,z+y,'k')
        mp.title('Fit of the density with a Gamma-Gaussians mixture')
        mp.legend(('data','gaussian acomponent','gamma component',
                   'mixture distribution'))
Esempio n. 10
0
    def visualization2(self, sp_to_vis=None):
        if sp_to_vis:
            species_ready = list(set(sp_to_vis).intersection(self.all_sp_signatures.keys()))
        else:
            raise Exception('list of driver species must be defined')

        if not species_ready:
            raise Exception('None of the input species is a driver')

        for sp in species_ready:
            # Setting up figure
            plt.figure()
            plt.subplot(313)

            mon_val = OrderedDict()
            signature = self.all_sp_signatures[sp]
            for idx, mon in enumerate(list(set(signature))):
                if mon[0] == 'C':
                    mon_val[self.all_comb[sp][mon] + (-1,)] = idx
                else:
                    mon_val[self.all_comb[sp][mon]] = idx

            mon_rep = [0] * len(signature)
            for i, m in enumerate(signature):
                if m[0] == 'C':
                    mon_rep[i] = mon_val[self.all_comb[sp][m] + (-1,)]
                else:
                    mon_rep[i] = mon_val[self.all_comb[sp][m]]
            # mon_rep = [mon_val[self.all_comb[sp][m]] for m in signature]

            y_pos = numpy.arange(len(mon_val.keys()))
            plt.scatter(self.tspan[1:], mon_rep)
            plt.yticks(y_pos, mon_val.keys())
            plt.ylabel('Monomials', fontsize=16)
            plt.xlabel('Time(s)', fontsize=16)
            plt.xlim(0, self.tspan[-1])
            plt.ylim(0, max(y_pos))

            plt.subplot(312)

            for name in self.model.odes[sp].as_coefficients_dict():
                mon = name
                mon = mon.subs(self.param_values)
                var_to_study = [atom for atom in mon.atoms(sympy.Symbol)]
                arg_f1 = [numpy.maximum(self.mach_eps, self.y[str(va)][1:]) for va in var_to_study]
                f1 = sympy.lambdify(var_to_study, mon)
                mon_values = f1(*arg_f1)
                mon_name = str(name).partition('__')[2]
                plt.plot(self.tspan[1:], mon_values, label=mon_name)
            plt.ylabel('Rate(m/sec)', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.1, 0.85), loc='upper right', ncol=1)

            plt.subplot(311)
            plt.plot(self.tspan[1:], self.y['__s%d' % sp][1:], label=parse_name(self.model.species[sp]))
            plt.ylabel('Molecules', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.15, 0.85), loc='upper right', ncol=1)
            plt.suptitle('Tropicalization' + ' ' + str(self.model.species[sp]))

            # plt.show()
            plt.savefig('s%d' % sp + '.png', bbox_inches='tight', dpi=400)
Esempio n. 11
0
def cdf(x,colsym="",lab="",lw=4):
    """ plot the cumulative density function

    Parameters
    ----------

    x : np.array()
    colsym : string
    lab : string
    lw : int
        linewidth

    Examples
    --------

    >>> import numpy as np

    """
    rcParams['legend.fontsize']=20
    rcParams['font.size']=20

    x  = np.sort(x)
    n  = len(x)
    x2 = np.repeat(x, 2)
    y2 = np.hstack([0.0, repeat(np.arange(1,n) / float(n), 2), 1.0])
    plt.plot(x2,y2,colsym,label=lab,linewidth=lw)
    plt.grid('on')
    plt.legend(loc=2)
    plt.xlabel('Ranging Error[m]')
    plt.ylabel('Cumulative Probability')
Esempio n. 12
0
def plot_runtime_results(results):
    plt.rcParams["figure.figsize"] = 7,7
    plt.rcParams["font.size"] = 22
    matplotlib.rc("xtick", labelsize=24)
    matplotlib.rc("ytick", labelsize=24)

    params = {"text.fontsize" : 32,
              "font.size" : 32,
              "legend.fontsize" : 30,
              "axes.labelsize" : 32,
              "text.usetex" : False
              }
    plt.rcParams.update(params)
    
    #plt.semilogx(results[:,0], results[:,3], 'r-x', lw=3)
    #plt.semilogx(results[:,0], results[:,1], 'g-D', lw=3)
    #plt.semilogx(results[:,0], results[:,2], 'b-s', lw=3)

    plt.plot(results[:,0], results[:,3], 'r-x', lw=3, ms=10)
    plt.plot(results[:,0], results[:,1], 'g-D', lw=3, ms=10)
    plt.plot(results[:,0], results[:,2], 'b-s', lw=3, ms=10)

    plt.legend(["Chain", "Tree", "FFT Tree"], loc="upper left")
    plt.xticks([1e5, 2e5, 3e5])
    plt.yticks([0, 60, 120, 180])

    plt.xlabel("Problem Size")
    plt.ylabel("Runtime (sec)")
    return results
Esempio n. 13
0
def _fig_density(sweight, surweight, pval, nlm):
    """
    Plot the histogram of sweight across the image
    and the thresholds implied by the surrogate model (surweight)
    """
    import matplotlib.pylab as mp
    # compute some thresholds
    nlm = nlm.astype('d')
    srweight = np.sum(surweight,1)
    srw = np.sort(srweight)
    nitem = np.size(srweight)
    thf = srw[int((1-min(pval,1))*nitem)]
    mnlm = max(1,nlm.mean())
    imin = min(nitem-1,int((1.-pval/mnlm)*nitem))
    
    thcf = srw[imin]
    h,c = np.histogram(sweight,100)
    I = h.sum()*(c[1]-c[0])
    h = h/I
    h0,c0 = np.histogram(srweight,100)
    I0 = h0.sum()*(c0[1]-c0[0])
    h0 = h0/I0
    mp.figure(1)
    mp.plot(c,h)
    mp.plot(c0,h0)
    mp.legend(('true histogram','surrogate histogram'))
    mp.plot([thf,thf],[0,0.8*h0.max()])
    mp.text(thf,0.8*h0.max(),'p<0.2, uncorrected')
    mp.plot([thcf,thcf],[0,0.5*h0.max()])
    mp.text(thcf,0.5*h0.max(),'p<0.05, corrected')
    mp.savefig('/tmp/histo_density.eps')
    mp.show()
Esempio n. 14
0
def plotFeaturePDF(ift, pft, outbase, fmin=0.0, fmax=1.0, fstep=0.01):
    """
    Plot a comparison between the input feature distribution and the 
    feature distribution of the predicted halos
    """
    plt.clf()
    nfbins = ( fmax - fmin ) / fstep
    fbins = np.logspace( fmin, fmax, nfbins )
    fcen = ( fbins[:-1] + fbins[1:] ) / 2

    plt.xscale( 'log', nonposx='clip' )
    plt.yscale( 'log', nonposy='clip' )
    
    ic, e, p = plt.hist( ift, fbins, label='Original Halos', alpha=0.5, normed=True )
    pc, e, p = plt.hist( pft, fbins, label='Added Halos', alpha=0.5, normed=True )

    plt.legend()
    plt.xlabel( r'$\delta$' )
    plt.savefig( outbase+'_fpdf.png' )

    fdtype = np.dtype( [ ('fcen', float), ('ifcounts', float), ('pfcounts', float) ] )
    fd = np.ndarray( len(fcen), dtype = fdtype )
    fd[ 'mcen' ] = fcen
    fd[ 'imcounts' ] = ic
    fd[ 'pmcounts' ] = pc

    fitsio.write( outbase+'_fpdf.fit', fd )
Esempio n. 15
0
def plotMassFunction(im, pm, outbase, mmin=9, mmax=13, mstep=0.05):
    """
    Make a comparison plot between the input mass function and the 
    predicted projected correlation function
    """
    plt.clf()

    nmbins = ( mmax - mmin ) / mstep
    mbins = np.logspace( mmin, mmax, nmbins )
    mcen = ( mbins[:-1] + mbins[1:] ) /2
    
    plt.xscale( 'log', nonposx = 'clip' )
    plt.yscale( 'log', nonposy = 'clip' )
    
    ic, e, p = plt.hist( im, mbins, label='Original Halos', alpha=0.5, normed = True)
    pc, e, p = plt.hist( pm, mbins, label='Added Halos', alpha=0.5, normed = True)
    
    plt.legend()
    plt.xlabel( r'$M_{vir}$' )
    plt.ylabel( r'$\frac{dN}{dM}$' )
    #plt.tight_layout()
    plt.savefig( outbase+'_mfcn.png' )
    
    mdtype = np.dtype( [ ('mcen', float), ('imcounts', float), ('pmcounts', float) ] )
    mf = np.ndarray( len(mcen), dtype = mdtype )
    mf[ 'mcen' ] = mcen
    mf[ 'imcounts' ] = ic
    mf[ 'pmcounts' ] = pc

    fitsio.write( outbase+'_mfcn.fit', mf )
Esempio n. 16
0
def plot_part2(filename):
	"""
	Plots the result of count ones test
	"""
	fig1 = pl.figure()
	iterations, runtimes, fvals = extract(filename)
	algos = ["SA", "GA", "MIMIC"]
	iters_sa, iters_ga, iters_mimic = [np.array(iterations[a]) for a in algos]
	runtime_sa, runtime_ga, runtime_mimic = [np.array(runtimes[a]) for a in algos]
	fvals_sa, fvals_ga, fvals_mimic = [np.array(fvals[a]) for a in algos]

	plotfunc = getattr(pl, "loglog")
	plotfunc(runtime_sa, fvals_sa, "bs", mew=0)
	plotfunc(runtime_ga, fvals_ga, "gs", mew=0)
	plotfunc(runtime_mimic, fvals_mimic, "rs", mew=0)

	# plotfunc(iters_sa, fvals_sa/(runtime_sa * iters_sa), "bs", mew=0)
	# plotfunc(iters_ga, fvals_ga/(runtime_ga * iters_ga), "gs", mew=0)
	# plotfunc(iters_mimic, fvals_mimic/(runtime_mimic * iters_mimic), "rs", mew=0)

	pl.xlabel("Runtime (seconds)")
	pl.ylabel("Objective function value")
	pl.ylim([min(fvals_sa) / 2, max(fvals_mimic) * 2])
	pl.legend(["SA", "GA", "MIMIC"], loc=4)

	pl.savefig(filename.replace(".csv", ".png"), bbox_inches="tight") 
Esempio n. 17
0
    def check_models(self):
        plt.figure('Bandgap narrowing')
        Na = np.logspace(12, 20)
        Nd = 0.
        dn = 1e14
        temp = 300.

        for author in self.available_models():
            BGN = self.update(Na=Na, Nd=Nd, nxc=dn,
                              author=author,
                              temp=temp)

            if not np.all(BGN == 0):
                plt.plot(Na, BGN, label=author)

        test_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'Si', 'check data', 'Bgn.csv')

        data = np.genfromtxt(test_file, delimiter=',', names=True)

        for name in data.dtype.names[1:]:
            plt.plot(
                data['N'], data[name], 'r--',
                label='PV-lighthouse\'s: ' + name)

        plt.semilogx()
        plt.xlabel('Doping (cm$^{-3}$)')
        plt.ylabel('Bandgap narrowing (K)')

        plt.legend(loc=0)
	def predict(self,train,test,w,progress=False):
		'''
		1-nearest neighbor classification algorithm using LB_Keogh lower 
		bound as similarity measure. Option to use DTW distance instead
		but is much slower.
		'''
		for ind,i in enumerate(test):
			if progress:
				print str(ind+1)+' points classified'
			min_dist=float('inf')
			closest_seq=[]
	
			for j in train:
				if self.LB_Keogh(i,j[:-1],5)<min_dist:
					dist=self.DTWDistance(i,j[:-1],w)
					if dist<min_dist:
						min_dist=dist
						closest_seq=j
			self.preds.append(closest_seq[-1])
			
			if self.plotter: 
				plt.plot(i)
				plt.plot(closest_seq[:-1])
				plt.legend(['Test Series','Nearest Neighbor in Training Set'])
				plt.title('Nearest Neighbor in Training Set - Prediction ='+str(closest_seq[-1]))
				plt.show()
Esempio n. 19
0
def test_l1_fit_rand_with_permissive(
    beta_d2=4.0, beta_d1=1.0, beta_seasonal=1.0, beta_step=2.5, period=12, noise=0, seed=3733, doplot=True, sea_amp=0.05
):
    # print "seed=%s,noise=%s,beta_d2=%s,beta_d1=%s,beta_step=%s," \
    #      "beta_seasonal=%s" % (seed,noise,beta_d2,beta_d1,beta_step,beta_seasonal)

    mock = make_l1tf_mock2(noise=noise, seed=seed, sea_amp=sea_amp)
    y = mock["y_with_seasonal"]
    xx = mock["x"]

    step_permissives = [(30, 0.5)]
    sol = l1_fit(
        xx,
        y,
        beta_d2=beta_d2,
        beta_d1=beta_d1,
        beta_seasonal=beta_seasonal,
        beta_step=beta_step,
        period=period,
        step_permissives=step_permissives,
    )

    if doplot:
        plt.clf()
        plt.plot(xx, y, linestyle="-", marker="o", markersize=4)
        plt.plot(xx, sol["xbase"], label="base")
        plt.plot(xx, sol["steps"], label="steps")
        plt.plot(xx, sol["seas"], label="seasonal")
        plt.plot(xx, sol["x"], label="full")
        plt.legend(loc="upper left")
def fit_plot_unlabeled_data(unlabeled_data_x, labeled_data_x, labeled_data_y, fit_order, data_type, other_data_list, other_data_name):
    output = open('predictions.csv','wb')
    coeffs = np.polyfit(labeled_data_x, labeled_data_y, fit_order) #does poly git to nth deg on labeled data
    fit_eq = np.poly1d(coeffs) #Eqn from fit
    predicted_y = fit_eq(unlabeled_data_x)
    i = 0
    writer = csv.writer(output,delimiter=',')
    header = [str(data_type),str(other_data_name),'Predicted_Num_Inc']
    writer.writerow(header)
    while i < len(predicted_y):
        output_data = [unlabeled_data_x[i],other_data_list[i],predicted_y[i]]
        writer.writerow(output_data)
        print 'For '+str(data_type)+' of: '+str(unlabeled_data_x[i])+', Predicted Number of Incidents is: '+str(predicted_y[i])
        i = i + 1
    plt.scatter(unlabeled_data_x, predicted_y, color='blue', label='Predicted Number of Incidents')
    fit_line_x = np.arange(min(unlabeled_data_x), max(unlabeled_data_x), 1)
    plt.plot(fit_line_x, fit_eq(fit_line_x), color='red',linestyle='dashed',label=' Order '+str(fit_order)+' Polynomial Fit')
#____Use below line to plot actual data also!! 
    #plt.scatter(labeled_data_x, labeled_data_y, color='green', label='Actual Incident Report Data')
    plt.title('Predicted Number of 311 Incidents by '+str(data_type))
    plt.xlabel(str(data_type))
    plt.ylabel('Number of 311 Incidents')
    plt.grid()
    plt.xlim([min(unlabeled_data_x)-1500, max(unlabeled_data_x)+1500])
    plt.legend(loc='upper left')
    plt.show()
    def plot_graph(self):
        '''
        plots a matplotlib graph from the stocks data. Dates on the x axis and
        Closing Prices on the y axis. Then adds it to the graph_win in the
        display frame as a tk widget()
        '''
        x_axis = [
			dt.datetime.strptime(self.daily_data[day][0], '%Y-%m-%d')
            for day in range(1, len(self.daily_data) - 1)
		]
        y_axis = [
			self.daily_data[cls_adj][-1]
            for cls_adj in range(1, len(self.daily_data) - 1)
		]
        fig = plt.figure()
        ax = fig.add_subplot("111")
        ax.plot(
			x_axis,
            y_axis,
            marker='h',
            linestyle='-.',
            color='r',
            label='Daily Adjusted Closing Prices'
		)
        labels = ax.get_xticklabels()
        for label in labels:
            label.set_rotation(15)
        plt.xlabel('Dates')
        plt.ylabel('Close Adj')
        plt.legend()
        plt.tight_layout()  # adjusts the graph to fit in the space its limited to
        self.data_plot = FigureCanvasTkAgg(fig, master=self.display)
        self.data_plot.show()
        self.graph_win = self.data_plot.get_tk_widget()
Esempio n. 22
0
def simulationWithoutDrug(numViruses, maxPop, maxBirthProb, clearProb,
                          numTrials):
    """
    Run the simulation and plot the graph for problem 3 (no drugs are used,
    viruses do not have any drug resistance).    
    For each of numTrials trial, instantiates a patient, runs a simulation
    for 300 timesteps, and plots the average virus population size as a
    function of time.

    numViruses: number of SimpleVirus to create for patient (an integer)
    maxPop: maximum virus population for patient (an integer)
    maxBirthProb: Maximum reproduction probability (a float between 0-1)        
    clearProb: Maximum clearance probability (a float between 0-1)
    numTrials: number of simulation runs to execute (an integer)
    """
    totalTime = 300
    noOfVirus = [0.0 for step in range(totalTime)]

    for trial in range(numTrials):
        viruses = [SimpleVirus(maxBirthProb, clearProb) for i in range(numViruses)]
        patient = Patient(viruses, maxPop)

        for step in range(totalTime):
            noOfVirus[step] += patient.update()

    for step in range(totalTime):
        noOfVirus[step] /= numTrials

    pylab.plot(range(totalTime), noOfVirus)
    pylab.title('Virus simulation without Drug')
    pylab.legend(['Virus without Drug'])
    pylab.xlabel('Time step')
    pylab.ylabel('Number of Viruses')
    pylab.show()
Esempio n. 23
0
    def _plot_nullclines(self, resolution):
        """
        Plot nullclines.

        Arguments
            resolution
                Resolution of plot
        """
        x_mesh, y_mesh, ode_x, ode_y = self._get_ode_values(resolution)

        plt.contour(
            x_mesh, y_mesh, ode_x,
            levels=[0], linewidths=2, colors='black')
        plt.contour(
            x_mesh, y_mesh, ode_y,
            levels=[0], linewidths=2, colors='black',
            linestyles='dashed')

        lblx = mlines.Line2D(
            [], [],
            color='black',
            marker='', markersize=15,
            label=r'$\dot\varphi_0=0$')
        lbly = mlines.Line2D(
            [], [],
            color='black', linestyle='dashed',
            marker='', markersize=15,
            label=r'$\dot\varphi_1=0$')
        plt.legend(handles=[lblx, lbly], loc='best')
Esempio n. 24
0
def test():
	## Load files
    s = load_spectrum('ring28yael')
    w = linspace(1510e-9,1600e-9,len(s))
    
	## Process
    mins = find_minima(s)
    w_p = 1510e-9 + array(mins) * 90.e-9/len(w)
    ww = 2 * pi * 3e8/w_p   
    
	## Plot
    pl.plot(w,s)
    pl.plot(w_p,s[mins],'o')
    pl.show()
    
    beta2 = -1./(112e-6*2*pi)*diff(diff(ww))/(diff(ww)[:-1]**3)
    p = polyfit(w_p[1:-1], beta2, 1)
    
    savetxt('ring28yael-p.txt', w_p)
    
    pl.subplot(211)
    pl.plot(w,s)
    pl.plot(w_p,s[mins],'o')
    
    pl.subplot(212)
    pl.plot(w_p[1:-1]*1e6, beta2)
    pl.plot(w_p[1:-1]*1e6, p[1]+ p[0]*w_p[1:-1], label="q=%.2e"%p[0])
    pl.legend()
        
    pl.show()
Esempio n. 25
0
	def behavioral_analysis(self):
		"""some analysis of the behavioral data, such as mean percept duration, 
		dominance ratio etc"""
		self.assert_data_intern()
		# only do anything if this is not a no report trial
		if 'RP' in self.file_alias:
			all_percepts_and_durations = [[],[]]
		else:
			all_percepts_and_durations = [[],[],[]]
		if not 'NR' in self.file_alias: #  and not 'RP' in self.file_alias
			for x in range(len(self.trial_indices)):
				if len(self.events) != 0:
					events_this_trial = self.events[(self.events['EL_timestamp'] > self.timestamps_pt[x][0]) & (self.events['EL_timestamp'] < self.timestamps_pt[x][-1])]
					for sc, scancode in enumerate(self.scancode_list):
						percept_start_indices = np.arange(len(events_this_trial))[np.array(events_this_trial['scancode'] == scancode)]
						percept_end_indices = percept_start_indices + 1
						
						# convert to times
						start_times = np.array(events_this_trial['EL_timestamp'])[percept_start_indices] - self.timestamps_pt[x,0]
						if len(start_times) > 0:
							if percept_end_indices[-1] == len(events_this_trial):
								end_times = np.array(events_this_trial['EL_timestamp'])[percept_end_indices[:-1]] - self.timestamps_pt[x,0]
								end_times = np.r_[end_times, len(self.from_zero_timepoints)]
							else:
								end_times = np.array(events_this_trial['EL_timestamp'])[percept_end_indices] - self.timestamps_pt[x,0]

							these_raw_event_times = np.array([start_times + self.timestamps_pt[x,0], end_times + self.timestamps_pt[x,0]]).T
							these_event_times = np.array([start_times, end_times]).T + x * self.trial_duration * self.sample_rate
							durations = np.diff(these_event_times, axis = -1)

							all_percepts_and_durations[sc].append(np.hstack((these_raw_event_times, these_event_times, durations)))

			self.all_percepts_and_durations = [np.vstack(apd) for apd in all_percepts_and_durations]

			# last element is duration, sum inclusive and exclusive of transitions
			total_percept_duration = np.concatenate([apd[:,-1] for apd in self.all_percepts_and_durations]).sum()
			total_percept_duration_excl = np.concatenate([apd[:,-1] for apd in [self.all_percepts_and_durations[0], self.all_percepts_and_durations[-1]]]).sum()

			self.ratio_transition = 1.0 - (total_percept_duration_excl / total_percept_duration)
			self.ratio_percept_red = self.all_percepts_and_durations[0][:,-1].sum() / total_percept_duration_excl

			self.red_durations = np.array([np.mean(self.all_percepts_and_durations[0][:,-1]), np.median(self.all_percepts_and_durations[0][:,-1])])
			self.green_durations = np.array([np.mean(self.all_percepts_and_durations[-1][:,-1]), np.median(self.all_percepts_and_durations[-1][:,-1])])
			self.transition_durations = np.array([np.mean(self.all_percepts_and_durations[1][:,-1]), np.median(self.all_percepts_and_durations[1][:,-1])])

			self.ratio_percept_red_durations = self.red_durations / (self.red_durations + self.green_durations)
			plot_mean_or_median = 0 # mean

			f = pl.figure(figsize = (8,4))
			s = f.add_subplot(111)
			for i in range(len(self.colors)):
				pl.hist(self.all_percepts_and_durations[i][:,-1], bins = 20, color = self.colors[i], histtype='step', lw = 3.0, alpha = 0.4, label = ['Red', 'Trans', 'Green'][i])
			pl.hist(np.concatenate([self.all_percepts_and_durations[0][:,-1], self.all_percepts_and_durations[-1][:,-1]]), bins = 20, color = 'k', histtype='step', lw = 3.0, alpha = 0.4, label = 'Percepts')
			pl.legend()
			s.set_xlabel('time [ms]')
			s.set_ylabel('count')
			sn.despine(offset=10)
			s.annotate("""ratio_transition: %1.2f, \nratio_percept_red: %1.2f, \nduration_red: %2.2f,\nduration_green: %2.2f, \nratio_percept_red_durations: %1.2f"""%(self.ratio_transition, self.ratio_percept_red, self.red_durations[plot_mean_or_median], self.green_durations[plot_mean_or_median], self.ratio_percept_red_durations[plot_mean_or_median]), (0.5,0.65), textcoords = 'figure fraction')
			pl.tight_layout()
			pl.savefig(os.path.join(self.analyzer.fig_dir, self.file_alias + '_dur_hist.pdf'))
Esempio n. 26
0
def plotRocCurves(file_legend):
	pylab.clf()
	pylab.figure(1)
	pylab.xlabel('1 - Specificity', fontsize=12)
	pylab.ylabel('Sensitivity', fontsize=12)
	pylab.title("Need for Referral")
	pylab.grid(True, which='both')
	pylab.xticks([i/10.0 for i in range(1,11)])
	pylab.yticks([i/10.0 for i in range(0,11)])
	pylab.tick_params(axis="both", labelsize=15)

	for file, legend in file_legend:
		points = open(file,"rb").readlines()
		x = [float(p.split()[0]) for p in points]
		y = [float(p.split()[1]) for p in points]
		dev = [float(p.split()[2]) for p in points]
		x = [0.0] + x
		y = [0.0] + y
		dev = [0.0] + dev
	
		auc = np.trapz(y, x) * 100
		aucDev = np.trapz(dev, x) * 100

		pylab.grid()
		pylab.errorbar(x, y, yerr = dev, fmt='-')
		pylab.plot(x, y, '-', linewidth = 1.5, label = legend + u" (AUC = {0:0.1f}% \xb1 {1:0.1f}%)".format(auc,aucDev))

	pylab.legend(loc = 4, borderaxespad=0.4, prop={'size':12})
	pylab.savefig("referral/referral-curves.pdf", format='pdf')
Esempio n. 27
0
 def sanity_PDMAna(self):
   import numpy
   import matplotlib.pylab as mpl
   from PyAstronomy.pyTiming import pyPDM
   
   # Create artificial data with frequency = 3,
   # period = 1/3
   x = numpy.arange(100) / 100.0
   y = numpy.sin(x*2.0*numpy.pi*3.0 + 1.7)
   
   # Get a ``scanner'', which defines the frequency interval to be checked.
   # Alternatively, also periods could be used instead of frequency.
   S = pyPDM.Scanner(minVal=0.5, maxVal=5.0, dVal=0.01, mode="frequency")
   
   # Carry out PDM analysis. Get frequency array
   # (f, note that it is frequency, because the scanner's
   # mode is ``frequency'') and associated Theta statistic (t).
   # Use 10 phase bins and 3 covers (= phase-shifted set of bins).
   P = pyPDM.PyPDM(x, y)
   f1, t1 = P.pdmEquiBinCover(10, 3, S)
   # For comparison, carry out PDM analysis using 10 bins equidistant
   # bins (no covers).
   f2, t2 = P.pdmEquiBin(10, S)
   
   
   # Show the result
   mpl.figure(facecolor='white')
   mpl.title("Result of PDM analysis")
   mpl.xlabel("Frequency")
   mpl.ylabel("Theta")
   mpl.plot(f1, t1, 'bp-')
   mpl.plot(f2, t2, 'gp-')
   mpl.legend(["pdmEquiBinCover", "pdmEquiBin"])
Esempio n. 28
0
def find_params():

    FRAMES =  np.arange(30)*100

    frame_images = organizedata.get_frames(ddir("bukowski_04.W2"), FRAMES)
    print "DONE READING DATA"

    CLUST_EPS = np.linspace(0, 0.5, 10)
    MIN_SAMPLES = [2, 3, 4, 5]
    MIN_DISTS = [2, 3, 4, 5, 6]
    THOLD = 240

    fracs_2 = np.zeros((len(CLUST_EPS), len(MIN_SAMPLES), len(MIN_DISTS)))

    for cei, CLUST_EP in enumerate(CLUST_EPS):
        for msi, MIN_SAMPLE in enumerate(MIN_SAMPLES):
            for mdi, MIN_DIST in enumerate(MIN_DISTS):
                print cei, msi, mdi
                numclusters = np.zeros(len(FRAMES))
                for fi, im in enumerate(frame_images):
                    centers = frame_clust_points(im, THOLD, MIN_DIST, 
                                                 CLUST_EP, MIN_SAMPLE)
                    # cluster centers
                    numclusters[fi] = len(centers)
                fracs_2[cei, msi, mdi] = float(np.sum(numclusters == 2))/len(numclusters)
    pylab.figure(figsize=(12, 8))
    for mdi, MIN_DIST in enumerate(MIN_DISTS):
        pylab.subplot(len(MIN_DISTS), 1, mdi+1)

        for msi in range(len(MIN_SAMPLES)):
            pylab.plot(CLUST_EPS, fracs_2[:, msi, mdi], label='%d' % MIN_SAMPLES[msi])
        pylab.title("min_dist= %3.2f" % MIN_DIST)
    pylab.legend()
    pylab.savefig('test.png', dpi=300)
Esempio n. 29
0
def is_stationary(ts, test_window):
    """
	This function checks whether the given TS is stationary. Can make it boolean, but lets just leave it
	for visualisation purposes. Not to be run once the numbers have been fixed.
	"""

    # Determine the rolling statistics (places like these compelled me to use Pandas and not numpy here)
    rol_mean = pd.rolling_mean(ts, window=test_window)
    rol_std = pd.rolling_std(ts, window=test_window)

    # Plot rolling statistics:
    orig = plt.plot(ts, color="blue", label="Original")
    mean = plt.plot(rol_mean, color="red", label="Rolling Mean")
    std = plt.plot(rol_std, color="black", label="Rolling Std")
    plt.legend(loc="best")
    plt.title("Rolling Mean & Standard Deviation")
    plt.show()

    # Perform the  Dickey-Fuller test: (Check documentation of fn for return params)
    print "Results of Dickey-Fuller Test:"
    dftest = adfuller(timeseries, autolag="AIC")
    dfoutput = pd.Series(dftest[0:4], index=["Test Statistic", "p-value", "#Lags Used", "Number of Observations Used"])
    for key, value in dftest[4].items():
        dfoutput["Critical Value (%s)" % key] = value
    print dfoutput
def plotFirstTacROC(dataset):
    import matplotlib.pylab as plt
    from os.path import join
    from src.utils import PROJECT_DIR

    plt.figure(figsize=(6, 6))
    time_sampler = TimeSerieSampler(n_time_points=12)
    evaluator = Evaluator()
    time_series_idx = 0
    methods = {
        "cross_correlation": "Cross corr.   ",
        "kendall": "Kendall        ",
        "symbol_mutual": "Symbol MI    ",
        "symbol_similarity": "Symbol sim.",
    }
    for method in methods:
        print method
        predictor = SingleSeriesPredictor(good_methods[method], time_sampler)
        prediction = predictor.predictAllInstancesCombined(dataset, time_series_idx)
        roc_auc, fpr, tpr = evaluator.evaluate(prediction)
        plt.plot(fpr, tpr, label=methods[method] + " (auc = %0.3f)" % roc_auc)
    plt.legend(loc="lower right")
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel("False Positive Rate")
    plt.ylabel("True Positive Rate")
    plt.grid()
    plt.savefig(join(PROJECT_DIR, "output", "firstTACROC.pdf"))
# A. integrate sin from 0->2pi
result, error = integrate.quad(sin, 0, pi / 2)
print "integral(sin 0->pi/2):", result

# B. Integrate sin from 0 to x where x is a range of
#    values from 0, 2*pi

x = linspace(0, 2 * pi, 101)

# 1. quad needs to be vectorized before you can call it with an array.
vquad = vectorize(integrate.quad)

# 2. Now calculate the integral using the vectorized function.
approx, error_est = vquad(sin, 0, x)

# 3. Evaluate the actual integral value for x.
exact = -cos(x) + cos(0)

# 4. Plot the comparison.
subplot(121)
plot(x, approx, label="Approx")
plot(x, exact, label="Exact")
xlabel('x')
ylabel('integral(sin)')
title('Integral of sin from 0 to x')
legend()

subplot(122)
plot(x, exact - approx)
title('Error in approximation')
show()
    def fit(self, Xtrain, Ytrain, Xtest, Ytest, epoch=50, learning_rate=0.001, batchsz=100):
        N, D = Xtrain.shape
        M1 = 1000
        M2 = 500

        tf_X = tf.placeholder(dtype=tf.float32)
        tf_Y = tf.placeholder(dtype=tf.float32)

        tf_W1 = tf.Variable(dtype=tf.float32,
                            initial_value=tf.random.normal(shape=(D, M1), mean=0, stddev=tf.math.sqrt(1 / D)))
        tf_b1 = tf.Variable(dtype=tf.float32, initial_value=np.zeros(shape=(M1)))

        tf_W2 = tf.Variable(dtype=tf.float32,
                            initial_value=tf.random.normal(shape=(M1, M2), mean=0, stddev=tf.math.sqrt(1 / M1)))
        tf_b2 = tf.Variable(dtype=tf.float32, initial_value=np.zeros(shape=(M2)))

        tf_W3 = tf.Variable(dtype=tf.float32,
                            initial_value=tf.random.normal(shape=(M2, self.NUM_CLASSES), mean=0,
                                                           stddev=tf.math.sqrt(1 / M2)))
        tf_b3 = tf.Variable(dtype=tf.float32, initial_value=np.zeros(shape=(self.NUM_CLASSES)))

        tf_Z1 = tf.nn.relu(tf.matmul(tf_X, tf_W1) + tf_b1)
        tf_Z2 = tf.nn.relu(tf.matmul(tf_Z1, tf_W2) + tf_b2)
        tf_Yhat = tf.nn.softmax(tf.matmul(tf_Z2, tf_W3) + tf_b3)

        tf_cost = tf.reduce_sum(-1 * tf_Y * tf.math.log(tf_Yhat))
        tf_train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(
            tf_cost)

        training_accuracies = []
        test_accuracies = []
        epoches = []

        with tf.Session() as session:
            session.run(tf.global_variables_initializer())

            nBatches = np.math.ceil(N / batchsz)

            for i in range(epoch):
                epoches.append(i)
                # Xtrain, Ytrain = sklearn.utils.shuffle(Xtrain, Ytrain)

                for j in range(nBatches):
                    lower = j * batchsz
                    upper = np.min([(j + 1) * batchsz, N])

                    session.run(tf_train,
                                feed_dict={tf_X: Xtrain[lower:upper], tf_Y: Ytrain[lower: upper]})

                test_error, Yhat = session.run([tf_cost, tf_Yhat], feed_dict={tf_X: Xtest, tf_Y: Ytest})
                test_accuracy = self.score(Ytest, Yhat)
                test_accuracies.append(test_accuracy)

                Yhat = session.run(tf_Yhat, feed_dict={tf_X: Xtrain, tf_Y: Ytrain})
                training_accuracy = self.score(Ytrain, Yhat)
                training_accuracies.append(training_accuracy)

                print('Epoch ' + str(i) + ' / test error = ' + str(test_error / Xtest.shape[0])
                      + ' / training_accuracy = ' + str(training_accuracy)
                      + ' / test_accuracy = ' + str(test_accuracy))

        # plot
        print(training_accuracies)
        print(test_accuracies)

        plt.plot(epoches, training_accuracies)
        plt.plot(epoches, test_accuracies)
        plt.xlabel('epoch')
        plt.ylabel('accuracy')
        plt.grid(True)
        plt.title('Accuracy')
        plt.legend()
        plt.show()
Esempio n. 33
0
#Creating the environment
env = nchain()
#Start the learning of the model
model = q_learning_keras(env,5000)
#Using the model to show how it works:
CASH = []
STOCK = []
NETWORTH = []
REWARD = []
ACTION = []
done = False
s = env.reset()
while not done:
    ACTION.append(np.argmax(model.predict(np.array([s]))))
    s, r, done = env.DO(np.argmax(model.predict(np.array([s]))))
    NETWORTH.append(env.result())
    CASH.append(s[2])
    STOCK.append(s[3])
    REWARD.append(r)
print(env.result())
plt.plot(CASH)
plt.plot(REWARD)
plt.plot(NETWORTH)
plt.plot(ACTION)
plt.legend(['cash','reward','net worth','action'])
plt.show()
plt.plot(STOCK)
plt.show()

Esempio n. 34
0
def go(sSrch='FluxData_2*.txt', Debug=False, showMag=True, binMinutes=5.3, \
           showSubset=True, showLegend=True):

    # let's look for our photom files
    lPhot = glob.glob(sSrch)

    iMin = 0
    iMax = len(lPhot)
    if Debug:
        iMax = 1

    # initialize master arrays
    tAll = np.array([])
    rAll = np.array([])
    eAll = np.array([])
    nAll = np.array([])

    # let's stack the point-by-point photometry too
    tFine = np.array([])
    rFine = np.array([])
    eFine = np.array([])

    for iFile in range(iMin, iMax):
        # thisTime, thisRate, thisError, thisNbin = readAndBin(lPhot[iFile])

        tRaw, rRaw, eRaw = np.genfromtxt(lPhot[iFile], unpack=True)
        thisTime, thisRate, thisError, thisNbin = \
            BinData(tRaw, rRaw, vError=eRaw, Verbose=False, BinTime=binMinutes/1440.)

        # now we np.hstack each 1D array onto its corresponding 1D master array
        tAll = np.hstack((tAll, thisTime))
        rAll = np.hstack((rAll, thisRate))
        eAll = np.hstack((eAll, thisError))
        nAll = np.hstack((nAll, thisNbin))

        # let's stack our raw data too
        tFine = np.hstack((tFine, tRaw))
        rFine = np.hstack((rFine, rRaw))
        eFine = np.hstack((eFine, eRaw))

        print lPhot[iFile], np.shape(tAll), np.shape(rAll), np.shape(
            eAll), np.shape(nAll)

    # all being well, after exitting the loop we should have the
    # master tAll, rAll etc. for the entire set of files. THESE we can
    # plot.

    #  tBin, rBin, eBin = readAndBin(lcFile)

    # we'll create separate variables to plot so that we can control what they do
    rSho = np.copy(rAll)
    eSho = np.copy(eAll)

    rShoFine = np.copy(rFine)
    eShoFine = np.copy(eFine)

    sYaxis = 'Flux relative to reference star'

    if showMag:
        rSho = -2.5 * np.log10(rAll)
        eSho = 1.086 * eAll

        rShoFine = -2.5 * np.log10(rShoFine)
        eShoFine = 1.086 * eShoFine

        sYaxis = r'$\Delta mag$ relative to reference star'

    # label for plots
    sLabelFine = '15s exposures'
    sLabelSho = '%.2f-min bins (%i exposures/bin)' \
        % (binMinutes, binMinutes / 0.25)

    # syntax to plot would come here...
    fig = plt.figure(1)
    fig.clf()
    plt.scatter(tAll, rSho, color='b', zorder=10, s=4, alpha=0.5, \
                    edgecolor='0.1')
    #dum = plt.scatter(tAll, rSho, c=eSho, zorder=15, s=9, cmap='Blues_r', \
    #                      vmin=0.0, vmax=0.01)
    plt.errorbar(tAll,
                 rSho,
                 yerr=eSho,
                 color='b',
                 alpha=0.5,
                 ls='none',
                 zorder=10,
                 label=sLabelSho,
                 marker='o',
                 ms=2)
    plt.gca().set_ylabel(sYaxis)
    plt.gca().set_xlabel('MJD')

    plt.ylim(0.6, 0.0)

    #cbar = plt.colorbar(dum)

    # we get the axis limits here so that we can apply them below
    yAx = plt.gca().get_ylim()

    # let's underplot the raw data
    faintColor = '0.7'
    plt.scatter(tFine, rShoFine, color=faintColor, zorder=5, s=3, alpha=0.5)
    plt.errorbar(tFine,
                 rShoFine,
                 yerr=eShoFine,
                 color=faintColor,
                 zorder=5,
                 ls='none',
                 ms=2,
                 alpha=0.5,
                 label=sLabelFine,
                 marker='o')
    plt.gca().set_ylim(yAx)

    # plt.show()

    leg = plt.legend()

    if showSubset:
        xLo = 58693.7
        xHi = 58694.0
        yLo = 0.30
        yHi = 0.00

        xPol = np.array([xLo, xHi, xHi, xLo, xLo])
        yPol = np.array([yLo, yLo, yHi, yHi, yLo])

        plt.plot(xPol, yPol, color='0.2', ls='--', zorder=15)

    # save the figure
    plt.savefig('v404Cyg_2019Jul_nights1-5.png')

    # ok now for the subset
    if showSubset:
        plt.xlim(xLo, xHi)
        plt.ylim(yLo, yHi)

        # show the lines as well
        xBrack = np.copy(plt.gca().get_xlim())
        bRange = (tAll > xBrack[0]) & (tAll <= xBrack[1])
        lSor = np.argsort(tAll[bRange])
        dum2 = plt.plot(tAll[bRange][lSor], rSho[bRange][lSor], \
                            zorder=9, color='b', lw=1, \
                            alpha=0.4)

        plt.savefig('v404Cyg_2019Jul_nights1-5_zoomNight5.png')
Esempio n. 35
0
                                                        one_hot_label=True)
train_size = x_train.shape[0]
batch_size = 100
step_num = 100
learning_rate = 0.1
steps = []
losses = []
acc = []

for i in range(step_num):
    steps.append(i)
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]

    #更新ごとに予測してグラフを描画
    losses.append(network.loss(x_batch, t_batch) / batch_size)
    acc.append(network.accuracy(x_batch, t_batch))

    #勾配ベクトルを求める
    grads = network.numerical_gradients(x_batch, t_batch)

    #パラメータの更新
    for param in ('w1', 'w2', 'b1', 'b2'):
        network.params[param] -= learning_rate * network.params[param]

plt.plot(steps, losses)
plt.plot(steps, acc)
plt.legend()
plt.show()
 def plot_evoked(self,ep,fname=None,save_plot=True,show_plot=False,condition=None,plot_dir=None,
                 info={'meg':{'scale':1e15,'unit':'fT'},'eeg':{'scale':1e3,'unit':'mV'}}):
     '''
     plot subplots evoked/average 
     MEG
     ECG/EOG + performance 
     STIM Trigger/Response
     events, rt mean median min max        
     '''
     name = 'test'
     subject_id = name
     if fname:
        fout_path = os.path.dirname(fname)
        name      = os.path.splitext( os.path.basename(fname) )[0]
        subject_id = name.split('_')[0]
     else:
        name      = "test.png"
        fout_path = "."
    
     if plot_dir:
        fout_path += "/" + plot_dir
        mkpath( fout_path )        
     fout =fout_path +'/'+ name   
        
     #pl.ioff()  # switch  off (interactive) plot visualisation
     pl.figure(name)
     pl.clf()
     #fig = pl.figure(name,figsize=(10, 8), dpi=100))
     
     pl.title(name)
    #--- meg
     pl.subplot(311)
     picks = self.picks.meg_nobads(ep)
     avg   = ep.average(picks=picks) 
     avg.data *= info['meg']['scale']
     t0,t1=avg.times.min(), avg.times.max() 
     
     pl.ylim(self.minmax(avg.data))
     pl.xlim(t0,t1)
      
     #pl.xlabel('[s]')
     pl.ylabel('['+ info['meg']['unit']+ ']')
     
     t= subject_id +' Evoked '
     if condition:
        t +=' '+condition
     if ep.info['bads']:         
        s = ','. join( ep.info['bads'] )
        pl.title(t +' bads: ' + s)
     else:
        pl.title(t)
    
     pl.grid(True)
     pl.plot(avg.times, avg.data.T,color='black')
      
    #--- ecg eog        
     pl.subplot(312)
     picks  = self.picks.ecg_eog(ep)
     labels =[ ep.info['ch_names'][x] for x in picks]
     avg    = ep.average(picks=picks) 
     avg.data *= info['eeg']['scale']
     pl.ylim(self.minmax(avg.data))
     pl.xlim(t0,t1)
     pl.ylabel('['+ info['eeg']['unit']+ ']')
     
     pl.grid(True)
     d = pl.plot(avg.times, avg.data.T)
     pl.legend(d, labels, loc=2,prop={'size':8})
 
    #--- stim        
     pl.subplot(313)
     picks = self.picks.stim_response(ep) 
     labels =[ ep.info['ch_names'][x] for x in picks]    
     labels[0] += '  Evts: %d Id: %d' %(ep.events.shape[0],ep.events[0,2]) 
     avg   = ep.average(picks=picks) 
     pl.ylim(self.minmax(avg.data))
     pl.xlim(t0,t1)
     pl.xlabel('[s]')
   
     pl.grid(True)
     d = pl.plot(avg.times, avg.data.T)
     pl.legend(d, labels, loc=2,prop={'size':8},)            
           
    #---
     if save_plot:
        fout += self.file_extention              
        pl.savefig(fout, dpi=self.dpi)
        if self.verbose:
           print"---> done saving plot: " +fout 
     else:
        fout= "no plot saved"
    #---
     if show_plot:
        pl.show()
     else:   
        pl.close()            
      
     return fout
Esempio n. 37
0
            if bothDirections:
                plt.plot(Utils.movingAverage(average),
                         label=str(2 * numOptionsToUse) + ' opt.',
                         color=Utils.colors[color_idx])
            else:
                plt.plot(Utils.movingAverage(average),
                         label=str(numOptionsToUse) + ' opt.',
                         color=Utils.colors[color_idx])

            plt.fill_between(range(len(Utils.movingAverage(average))),
                             Utils.movingAverage(minConfInt),
                             Utils.movingAverage(maxConfInt),
                             alpha=0.5,
                             color=Utils.colors[color_idx])

        plt.legend(loc='upper left', prop={'size': 10}, bbox_to_anchor=(1, 1))
        plt.tight_layout(pad=7)
        plt.show()

    elif taskToPerform == 7:  # Solve for a given goal w/ primitive actions (q-learning)
        # following discovered AND loaded options This one is for comparison.
        numOptionsLoadedToConsider = 4
        numOptionsDiscoveredToConsider = 128

        returnsEvalPrimitive1, returnsEvalDiscovered, totalOptionsToUseDiscovered = qLearningWithOptions(
            env=env,
            alpha=0.1,
            gamma=0.9,
            options_eps=0.0,
            epsilon=1.0,
            nSeeds=num_seeds,
Esempio n. 38
0
    k2 = f(t + 0.5 * dt, y + 0.5 * k1 * dt)
    k3 = f(t + 0.5 * dt, y + 0.5 * k2 * dt)
    k4 = f(t + 1.0 * dt, y + 1.0 * k3 * dt)
    return y + (k1 + 2 * k2 + 2 * k3 + k4) * dt / 6


F = lambda a: (lambda x, phi: np.sqrt(2 * (np.exp(phi) - 1 + a *
                                           (np.sqrt(1 - 2 * phi / a) - 1))))

a_s = np.array(sys.argv[1:], dtype=np.float64)
print(a_s)
Vo_s = [0.1, 1, 10, 50]
N = 1001
X = np.linspace(0, 10, N)
dx = X[1] - X[0]

for a in a_s:
    plt.figure()
    for Vo in Vo_s:
        PHI = np.zeros_like(X)
        PHI[0] = -Vo
        for i in range(N - 1):
            PHI[i + 1] = RK4(PHI[i], F(a), X[i], dx)
        plt.plot(X, PHI / Vo, "-", lw=3)
    k = np.sqrt(1.0 - 1.0 / a)
    plt.plot(X, -np.exp(-k * X), "--")
    plt.xlabel("$x/L$")
    plt.ylabel(r"$\phi/V_o$")
    plt.legend([r"$eV_o=%.1fT_e$" % (Vo) for Vo in Vo_s], loc="lower right")
plt.show()
Esempio n. 39
0
def draw(graph,
         title=None,
         layout=None,
         filename=None,
         return_ax=False,
         pos=None,
         font_size=9,
         alpha=1.0,
         label_shift=(0, 0),
         truncate_labels=10):
    """Graph drawing made a bit easier
    
    Parameters:
        :graph (Graph): input graph, has to be generated via kegg_link_graph()
        :layout (str): layout type, choose from 'bipartite_layout',\
        'circular_layout','kamada_kawai_layout','random_layout',\ 'shell_layout',\
        'spring_layout','spectral_layout'
        :filename (str): if a filename is selected saves the plot as filename.png
        :title (str): title for the graph
        :return_ax: if True returns ax for plot
        
    Returns:
        :ax (list): optional ax for the plot


        """
    default_layout = "spring_layout"
    if layout is None:
        layout = default_layout

    node_groups = {}

    graph_nodetypes = get_unique_nodetypes(graph)

    base_colors = list(mplcolors.BASE_COLORS.keys())

    for i, nodetype in enumerate(graph_nodetypes):
        node_group = (get_nodes_by_nodetype(graph, nodetype,
                                            return_dict=True).keys())
        node_groups.update({nodetype: (node_group, base_colors[i])})

    if title is None:
        if len(graph_nodetypes) == 1:
            title = "{} graph".format(graph_nodetypes[0])
        if len(graph_nodetypes) == 2:
            title = "{} > {} graph".format(graph_nodetypes[1],
                                           graph_nodetypes[0])
        else:
            title = "Graph plot"

    layouts = {
        "circular_layout": nx.circular_layout,
        "kamada_kawai_layout": nx.kamada_kawai_layout,
        "random_layout": nx.random_layout,
        "shell_layout": nx.shell_layout,
        "spring_layout": nx.spring_layout,
        "spectral_layout": nx.spectral_layout,
    }

    if layout not in layouts:
        logging.warning(
            "layout {} not valid: using {} layout\nusing default layout".
            format(layout, default_layout))
        layout = default_layout

    plt.figure()

    if pos is None:
        output_layout = layouts[layout](graph)
        pos = {}
        for key, value in output_layout.items():
            pos[key] = tuple(value)

    for nodetype, node_group in node_groups.items():
        nx.draw_networkx(graph,
                         nodelist=node_group[0],
                         pos=pos,
                         node_color=node_group[1],
                         with_labels=False,
                         label=nodetype)

    nx.draw_networkx_edges(graph, pos)
    pos_labels = shift_pos(pos, label_shift)

    candidate_labels = nx.get_node_attributes(graph, "label")

    if candidate_labels != {}:
        if truncate_labels != False:
            labels = shorten_labels(candidate_labels, truncate_labels)
        else:
            labels = candidate_labels
    else:
        #        labels = None
        nodelist = list(graph.nodes)
        labels = dict(zip(nodelist, nodelist))

    nx.draw_networkx_labels(graph,
                            pos_labels,
                            labels=labels,
                            font_size=font_size,
                            alpha=alpha)

    plt.legend()
    if title is not None:
        plt.title(title)

    plt.axis("off")

    if filename is not None:
        plt.savefig("output.png")

    plt.show()

    if return_ax:
        ax = plt.gca()

        return ax
Esempio n. 40
0
        dos_r.append(float(line.split()[1]))

    frequency_p = []
    power_spectrum = []
    for line in power_file.readlines():
        frequency_p.append(float(line.split()[0]))
        power_spectrum.append(float(line.split()[1]))

    # power_spectrum = get_dos(temp,frequency_p,power_spectrum, 12*12*6)

    power_spectrum = get_dos(temp, frequency_p, power_spectrum, 12 * 12 * 12)

    pl.plot(frequency_p, power_spectrum, label='power')
    pl.plot(frequency, dos, label='dos')
    pl.plot(frequency_r, dos_r, label='dos_r')
    pl.legend()
    pl.show()

    # free_energy = get_free_energy(temp,frequency,dos) + get_free_energy_correction(temp, frequency, dos, shift)

    print((get_free_energy_correction_shift(temp, frequency, dos, shift),
           get_free_energy_correction_dos(temp, frequency, dos, dos_r)))

    free_energy = get_free_energy(temp, frequency_r,
                                  dos_r) + get_free_energy_correction_dos(
                                      temp, frequency, dos_r, dos)
    entropy = get_entropy(temp, frequency_r, dos_r)
    c_v = get_cv(temp, frequency_r, dos_r)
    print('Renormalized')
    print('-------------------------')
    print(('Free energy: {0} KJ/K/mol'.format(free_energy)))
Esempio n. 41
0
    x=train_imgs,  # Input should be (train_cases, 128, 128, 1)
    y=train_lbls,
    batch_size=batch_size,
    epochs=epochs,
    verbose=2,
    validation_data=(test_imgs, test_lbls),
    callbacks=[tensorboard, es],
)
my_cnn.save('model.h5')
print(np.max(history.history['val_acc']))
plt.figure()
plt.plot(history.history['acc'], label='train accuracy')
plt.plot(history.history['val_acc'], label='test accuracy')
plt.ylabel('Accuracy')
plt.xlabel('epoch')
plt.legend(loc='lower right')

plt.figure()
plt.plot(history.history['loss'], label='train accuracy')
plt.plot(history.history['val_loss'], label='test accuracy')
plt.ylabel('Loss')
plt.xlabel('epoch')
plt.legend(loc='lower right')
plt.show()

#Confution Matrix and Classification Report
Y_pred = my_cnn.predict_classes(test_imgs, batch_size=int(len(test_imgs) / 10))
y_pred = np.argmax(Y_pred, axis=1)
#print(Y_pred)
#print(y_pred)
print(test_lbls)
Esempio n. 42
0
	import numpy as np
	XX = np.array([1.0])	
	for i, x in enumerate(L):
		if type(x) != type(XX[0]):
			print(i, x, type(x))
			
def delete_none(L):
	return [x for x in L if x != None]

import matplotlib.pylab as plt
f = open('data3.pickle', 'rb')
n = pickle.load(f)
k = pickle.load(f)
m = pickle.load(f)

for s, L in zip(['n', 'k', 'm'], [n, k, m]): 
	print(s)
	verify_list(L)
p = delete_none(n)
l = delete_none(k)
s = delete_none(m)

plt.hist([s, p, l], bins=30, histtype='step',range = (0,0.002),label=['R=1', 'R=0.1', 'R=0.05'], fill = False, cumulative = False, stacked = False, color=['blue', 'green', 'red'])
plt.xlabel('Pseudomass (Gev^2)')
plt.ylabel('Frequency')
#plt.text(.0020,100,'this histogram represents the pseudomass of the jets with more than one constituent', color ='red', fontsize=8, bbox = {'facecolor': 'white', 'pad':9}, verticalalignment='top', horizontalalignment='center')
plt.text(0.004,900,'p = -1', color = 'blue', bbox= {'facecolor': 'white', 'pad':10})
plt.title('Pseudomass Observable')
plt.legend(loc='upper right')
plt.show()
Esempio n. 43
0
    def drawsail(self, numberofpermutations=1000, optimalpctasdecimal=0.90):
        df = self.permutationstodataframe(numberofpermutations)
        #df['returnoverrisk'] = df.portfolioreturn / df.portfoliostandarddeviation
        maxreturnoverriskseries = df.ix[df['returnoverrisk'].idxmax()]
        df['maxreturnoverrisk'] = maxreturnoverriskseries['returnoverrisk']
        print 'the max returnoverrisk is:', maxreturnoverriskseries[
            'returnoverrisk']
        #df.apply(lambda row: min([row['A'], row['B']])-row['C'], axis=1)
        #df.plot(title='Title Here')
        import matplotlib.pylab as plt
        #import numpy as np
        #import pandas as pd
        #import numpy as np
        #df = pd.DataFrame(np.random.randn(10,2), columns=['col1','col2'])
        #df['col3'] = np.arange(len(df))**2 * 100 + 100
        #print df
        #plt.scatter(df.col1, df.col2, s=df.col3)
        #colors = np.where(df.col3 > 300, 'r', 'k')

        #fig = plt.figure()
        fig = plt.figure(figsize=(12.0, 9.0))  # in inches!

        cond = df.returnoverrisk > df.maxreturnoverrisk * optimalpctasdecimal
        subset_a = df[cond].dropna()
        subset_b = df[~cond].dropna()
        plt.scatter(subset_a.portfoliostandarddeviation,
                    subset_a.portfolioreturn,
                    s=7,
                    c='red',
                    label='frontier >' + str(int(optimalpctasdecimal * 100)) +
                    '%',
                    marker='s',
                    edgecolors='none')
        plt.scatter(subset_b.portfoliostandarddeviation,
                    subset_b.portfolioreturn,
                    s=7,
                    c='dodgerblue',
                    label='suboptimal',
                    marker='s',
                    edgecolors='none')

        from matplotlib.ticker import FuncFormatter
        ax = plt.subplot(111)
        ax.xaxis.set_major_formatter(FuncFormatter(self.myfunc))
        ax.yaxis.set_major_formatter(FuncFormatter(self.myfunc))

        plt.legend(fontsize=12)
        fig.suptitle(
            'Optimal Weights  (' +
            self.EfficientFrontierObject.StartDateString + ' to ' +
            self.EfficientFrontierObject.EndDateString + ')' + chr(10) +
            maxreturnoverriskseries['weightstring'] + chr(10) + 'N=' +
            str(numberofpermutations) + '   '
            'Annualized Return=' +
            str(round(maxreturnoverriskseries['portfolioreturn'] * 100, 2)) +
            '%   ' + 'StDev=' + str(
                round(
                    maxreturnoverriskseries['portfoliostandarddeviation'] *
                    100, 2)) + '%',
            fontsize=12)
        plt.xlabel('Risk (StDev)', fontsize=12)
        plt.ylabel('Return (%)', fontsize=12)
        import datetime
        today_datetime = datetime.datetime.today()
        today_datetime_string_forfilename = today_datetime.strftime(
            '%Y%m%d%H%M%S')
        import config
        cachefilename = config.mycachefolder + '\\drawsail ' + today_datetime_string_forfilename + '.jpg'
        fig.savefig(cachefilename)
        return cachefilename
Esempio n. 44
0
def plotDetections(func,
                   regions,
                   gt=[],
                   ticks=None,
                   export=None,
                   silent=True,
                   detailedvis=False):
    """ Plots a time series and highlights both detected regions and ground-truth regions.
    
    `regions` specifies the detected regions as (a, b, score) tuples.
    
    `gt` specifies the ground-truth regions either as (a, b) tuples or as pointwise boolean labels.
    
    Custom labels for the ticks on the x axis may be specified via the `ticks` parameter, which expects
    a dictionary with tick locations as keys and tick labels as values.
    
    If `export` is set to a string, the plot will be saved to that filename instead of being shown.
    
    Setting `silent` to False will print the detected intervals to the console.
    """

    # Convert pointwise ground-truth to list of regions
    if (len(gt) > 0) and (not (isinstance(gt[0], tuple)
                               or isinstance(gt[0], list))):
        gt = pointwiseLabelsToIntervals(gt)

    # Plot time series and ground-truth intervals
    plotted_function = False
    for a, b in gt:
        show_interval(func,
                      a,
                      b,
                      10000,
                      'r',
                      1.0,
                      plot_function=not plotted_function,
                      border=True)
        plotted_function = True

    # Plot detected intervals with color intensities corresponding to their score
    if len(regions) > 0:
        minScore = min(r[2] for r in regions)
        maxScore = max(r[2] for r in regions)
        for i in range(len(regions)):
            a, b, score = regions[i]
            if not silent:
                print("Region {}/{}: {} - {} (Score: {})".format(
                    i, len(regions), a, b, score))
            intensity = float(score - minScore) / (
                maxScore - minScore) if minScore < maxScore else 1.0
            show_interval(func,
                          a,
                          b,
                          10000,
                          plot_function=not plotted_function,
                          color=(0.8 - intensity * 0.8, 0.8 - intensity * 0.8,
                                 1.0))
            plotted_function = True

            # Show supplementary visualization
            if detailedvis:
                mainFigNum = plt.gcf().number
                detailfig = plt.figure()
                if func.shape[0] == 1:
                    h_nonextreme, bin_edges = np.histogram(np.hstack(
                        [func[0, :a], func[0, b:]]),
                                                           bins=40)
                    h_extreme, _ = np.histogram(func[0, a:b], bins=bin_edges)
                    bin_means = 0.5 * (bin_edges[:-1] + bin_edges[1:])
                    plt.plot(bin_means, h_extreme, figure=detailfig)
                    plt.plot(bin_means, h_nonextreme, figure=detailfig)
                else:
                    X_nonextreme = np.hstack([func[:2, :a], func[:2, b:]])
                    X_extreme = func[:2, a:b]
                    plt.plot(X_nonextreme[0],
                             X_nonextreme[0],
                             'bo',
                             figure=detailfig)
                    plt.plot(X_extreme[0],
                             X_extreme[0],
                             'r+',
                             figure=detailfig)
                plt.figure(mainFigNum)

    # Draw legend
    patch_detected_extreme = mpatches.Patch(color='blue',
                                            alpha=0.3,
                                            label='detect. extreme')
    patch_gt_extreme = mlines.Line2D([], [], color='red', label='gt extreme')
    patch_time_series = mlines.Line2D([], [],
                                      color='blue',
                                      label='time series')

    plt.legend(
        handles=[patch_time_series, patch_gt_extreme, patch_detected_extreme],
        loc='center',
        mode='expand',
        ncol=3,
        bbox_to_anchor=(0, 1, 1, 0),
        shadow=True,
        fancybox=True)

    # Set tick labels
    if ticks is not None:
        ax = plt.gca()
        ax.set_xticks(list(ticks.keys()))
        ax.set_xticklabels(list(ticks.values()))

    # Display plot
    if export:
        plt.savefig(export)
    else:
        plt.show()
Esempio n. 45
0
def fourier_analysis(sig,
                     timestep,
                     N_MAX_POW=1,
                     generate_plot=False,
                     display_plot=False,
                     **kwargs):
    ''' Calculate sinusoidal signal spectrum using Fast Fourier Transformation,
    pick certain number of frequencies with maximum power (N = N_MAX_FREQ)
    and return an equation in form:
        y = H0 + A1*cos(omega1*t+phi1) + A2*cos(omega2*t+phi2) + ... + A_i*cos(omega_i*t+phi_i)
    where i = N_MAX_FREQ.

    Also can plot a nice graphic.

    Args:
    -----
        sig (1D - array of floats) [m AMSL]:
            Measured with equal intervals values of the signal of interest. In our case
            this array contains measured values of the water level in [m AMSL].
        timestep (int) [s]:
            Number of seconds between two measurements in array `sig` (i.e. measurement interval)
        N_MAX_POW (int):
            Number of frequencies with maximum power to pick for computing the curve equation.
            Example:
            N_MAX_POW = 1 >>> will compute following equation:
            y = H0 + A1*cos(omega1*t+phi1), where
                A1, omega1, phi1 - params of sinusoid which frequency has maximal power

            N_MAX_POW = 3 >>> will compute following equation:
            y = H0 + A1*cos(omega1*t+phi1) + A2*cos(omega2*t+phi2) +  A3*cos(omega3*t+phi3), where
                A1, omega1, phi1 - params of sinusoid which frequency has maximal power
                A2, omega2, phi2 - params of sinusoid which frequency has second maximal power
                A3, omega3, phi3 - params of sinusoid which frequency has third maximal power
        generate_plot (bool):
            flag to visualize result

        **kwargs:
            are passed to plot functions (see code below)

    Return:
    -------
        EQUATION (dict):
            dictionary with estimated parameters.
                EQUATION['0']['A'] >>> freq=0 amplitude (SPECIAL CASE!)
                EQUATION['1']['A'] >>> ampl1
                EQUATION['1']['omega'] >>> omega1
                EQUATION['1']['phi'] >>> phi1
                EQUATION['2']['A'] >>> ampl2
                EQUATION['2']['omega'] >>> omega2
                EQUATION['2']['phi'] >>> phi2
                etc...
        f_str (str):
            string of the generated function
        f (function):
            estimated function `f(t)` that defines water-level (is generated
            from `exec(f_str) in globals(), locals()`)

    '''
    if N_MAX_POW < 1:
        return
    msize = kwargs.get('markersize', 2)
    marker = kwargs.get('marker', 'x')
    hz2day = kwargs.get('convert2day', True)
    datetime_plot = kwargs.get('datetime_plot', None)

    # consider reading example here
    # http://www.scipy-lectures.org/intro/scipy.html#fast-fourier-transforms-scipy-fftpack
    sig_fft = fftpack.fft(sig)  # compute FFT

    # generate sampling frequencies
    sample_freq = fftpack.fftfreq(len(sig), d=timestep)
    # The signal is supposed to come from a real function so the Fourier transform will be symmetric
    # Because the resulting power is symmetric, only the positive part of the spectrum needs to be used for finding the frequency
    pidxs = np.where(
        sample_freq > 0
    )  # get indexes where sample_frequency >0, we will treat sample_freq=0 in special case
    freqs = sample_freq[pidxs]
    power = abs(sig_fft)[pidxs]

    # ---------------------------------------
    # get maximum threshold power (each frequency which has power below this value will be ignored)
    thres_power = np.sort(power)[-N_MAX_POW]
    weak_power_idxs = np.where(abs(sig_fft) < thres_power)
    sig_fft[weak_power_idxs] = 0

    # now loop over FFT solution (already with ingored "weak frequencies") and get the curve equation params
    EQUATION = {}
    for i, complex_val in enumerate(sig_fft):
        a = abs(complex_val) / len(sig_fft)  # amplitude
        omega = sample_freq[i] * 2 * pi  # angular velocity in [rad/s]

        if complex_val == 0 or omega <= 0:  # we ignore "weak frequencies" (==0) and negative symmetrical frequencies (omega <0)
            continue

        phi = arccos(complex_val.real / abs(complex_val))
        phi *= 1 if complex_val.imag >= 0 else -1

        EQUATION['{0}'.format(i + 1)] = {}
        EQUATION['{0}'.format(
            i + 1
        )]['A'] = a * 2.  #amplitude in `sig` units (multiplied by 2 due to ignorance of negative symmetrical frequencies)
        EQUATION['{0}'.format(
            i + 1)]['omega'] = omega  # angular velocity in [rad/sec]
        EQUATION['{0}'.format(i + 1)]['phi'] = phi  # phase shift in [rad]

    # finally treat freq=0 special case
    EQUATION['0'] = {}
    EQUATION['0']['A'] = abs(sig_fft[np.where(sample_freq == 0)]) / len(sig)

    # now generate computation function
    def generate_sig_simplified_function(EQUATION):
        ''' EQUATION dictionary is created above
        y = H0 + A1*cos(omega1*t+phi1) + A2*cos(omega2*t+phi2) + ... + A_i*cos(omega_i*t+phi_i)
        '''
        STR = u"def generated_function(t): return (np.zeros({0}) + {1}".format(
            len(t), EQUATION['0']['A'])
        for k, v in EQUATION.iteritems():
            if k == '0':
                continue
            STR += u" + {0}*np.cos({1}*t+{2})".format(v['A'], v['omega'],
                                                      v['phi'])
        STR += ')'
        exec(STR) in globals(), locals()
        return (STR, generated_function)

    # simulate time_vector [seconds]
    t = np.arange(0, len(sig) * timestep, timestep)
    # calculate y-values with simplified equation
    f_str, f = generate_sig_simplified_function(EQUATION)
    y = f(t)

    fig = None
    if generate_plot:
        ampl = power / len(sig) * 2.  # convert power to amplitude
        freqs = np.insert(freqs, 0, 0.)  # insert special case freq==0
        ampl = np.insert(ampl, 0,
                         EQUATION['0']['A'])  # insert special case freq==0

        fig, axes = plt.subplots(2)
        ax1, ax2 = axes

        if hz2day:
            ax1.plot(freqs * 60 * 60 * 24,
                     ampl,
                     marker=marker,
                     markersize=msize)
            ax1.set_xlabel('Frequency [cycles/day]')
        else:
            ax1.plot(freqs, ampl, marker=marker, markersize=msize)
            ax1.set_xlabel('Frequency [Hz]')
        ax1.set_title('Amplitude Spectral Density')
        ax1.set_ylabel('Amplitude [m]')
        ax1.set_yscale('log')

        if datetime_plot is None:
            T = t / 3600.
            ax2.set_xlabel('Time [hours]')
        else:
            T = datetime_plot
        ax2.scatter(T,
                    sig,
                    color='g',
                    label='Original signal',
                    marker='x',
                    s=50,
                    linewidths=1.2)
        #ax2.plot(fftpack.ifft(sig_fft), 'b-.', lw=2., label='Fitted signal')
        ax2.plot(T, y, 'r-', lw=2., label='Fitted signal (simplified)')
        ax2.plot(T, y - sig, 'm--', lw=1., label='Residuals')
        ax2.set_title('Timeseries')
        ax2.set_ylabel('Water Level [m AMSL]')

        plt.legend()
        fig.tight_layout()
        if display_plot:
            fig.show()
    return (EQUATION, f_str, f, fig)
Esempio n. 46
0
# This script compares the plots of contiguity
# as a function of melt fraction, for 3 different
# formulations.
# Copyright Saswata Hier-Majumder, 2017
from mumap_fwd import *
import matplotlib.pylab as plt

phi1 = np.linspace(1.0e-3, 0.15)
Basalt = Poroelasticity(phi=phi1)
# The above statement assumes a default dihedral angle of 20.
# To change the dihedral angle to, say, 15,  replace the call above by
# Basalt=Poroelasticity(theta=15.0, phi=phi1)
# Notice that the WHM12 model is insensitive to dihedral angle

plt.figure = 1
#Calculate contiguity from von Bargen and Waff, 1986 (default option)
Basalt.set_contiguity()
plt.plot(Basalt.meltfrac, Basalt.Contiguity, '-r')
#Calculate contiguity from Wimert and Hier-Majumder, 2012
Basalt.set_contiguity(contiguity_model=2)
plt.plot(Basalt.meltfrac, Basalt.Contiguity, '-b')
#Calculate contiguity from Hier-Majumder et al. (2006)
Basalt.set_contiguity(contiguity_model=3)
plt.plot(Basalt.meltfrac, Basalt.Contiguity, '-g')
plt.legend(['VBW86', 'WHM12', 'HMRB06'])
plt.xlabel('Melt fraction')
plt.ylabel('Contiguity')
plt.show()
Esempio n. 47
0
    time = path.name[:8]
    if time not in rankings:
        rankings[time] = {}
    ranking = RankingSystem.read(path)
    rankings[time].update(ranking.get_mmr_all())

bots = sorted(set([bot_id for time in rankings for bot_id in rankings[time]]))
times = sorted(rankings.keys())
data = {bot: [rankings[time].get(bot) or 33 for time in times] for bot in bots}
df = pd.DataFrame.from_dict(data,
                            orient='index',
                            columns=range(1,
                                          len(times) + 1)).transpose()

print(df)

plt.figure(figsize=(10.0, 5.0))
plt.plot(df)
plt.legend(bots,
           bbox_to_anchor=(1.05, 1),
           loc='upper left',
           fontsize='small',
           ncol=2)
plt.subplots_adjust(right=0.6)
plt.title("MMR per week")
plt.xlim([1, len(times)])
plt.xlabel("Week")
plt.ylabel("MMR")
plt.grid(axis="y", linestyle=":")
plt.show()
Esempio n. 48
0
    def plot_global_fit(self,
                        one_plot=False,
                        plot_guess=True,
                        plot_fit=True,
                        save=False,
                        abs=False,
                        title=None,
                        curves=None,
                        large=False,
                        plot_color_bar=True,
                        plot_title=True,
                        xlabel=True):
        self.one_plot = one_plot
        if curves is not None:
            ind_s = [np.int(curve * (len(self.ns) - 1)) for curve in curves]
        else:
            ind_s = range(len(self.ns))

        for ind, tuple in enumerate(zip(self.datas, self.ns, self.probes)):
            if ind in ind_s:
                datas, n, probes = tuple
                if large:
                    offset_freq = self.CAVITY_FREQUENCY
                    scale_freq = 1e-6
                else:
                    offset_freq = \
                        self.PUMP_FREQUENCY+self.MECHANICAL_FREQUENCY
                    scale_freq = 1.
                if not one_plot:
                    self.fig_subplots = plt.figure()
                if self.fig_subplots is None:
                    if (ind == 0 and one_plot):
                        self.fig_subplots = plt.figure()
                self.probes_fit = np.linspace(np.min(self.probes),
                                              np.max(self.probes), 10000)
                if plot_fit:
                    self.plot(
                        scale_freq * (np.array(self.probes_fit) - offset_freq),
                        self.fit_func(self.probes_fit, n, self.fitted_params),
                        abs,
                        label='fit')
                if plot_guess:
                    self.plot(scale_freq *
                              (np.array(self.probes_fit) - offset_freq),
                              self.fit_func(self.probes_fit, n, self.guess()),
                              abs,
                              label='guess')
                self.plot(scale_freq * (np.array(probes) - offset_freq),
                          datas / self.normalization,
                          abs,
                          label='raw data',
                          color=plt.cm.viridis(
                              ind / len(list(zip(self.datas, self.ns)))))
                if not one_plot:
                    plt.legend()
                    if plot_title:
                        if title is None:
                            plt.suptitle(
                                r'OMIT signal, $\nu_0=${:.2f} GHz, $\nu_m=${'
                                r':.2f} '
                                r'kHz, '
                                r'n={:}'.format(
                                    self.CAVITY_FREQUENCY / 1e9,
                                    self.MECHANICAL_FREQUENCY / 1e3, int(n)))
                        else:
                            plt.suptitle(title)
                    if save:
                        if self.dir is None:
                            self.dir = self.curve.get_or_create_dir()
                        plt.savefig(osp.join(self.dir,
                                             'fit_n={:}.png'.format(int(n))),
                                    dpi=200)
                        plt.savefig(osp.join(self.dir,
                                             'fit_n={:}.pdf'.format(int(n))),
                                    dpi=200)

        if one_plot:
            if xlabel:
                if abs:
                    self.abs_ax.set_xlabel(
                        r'Detuning from $\nu_{pump}+\nu_m$ (Hz)')
                else:
                    self.im_ax.set_xlabel(
                        r'Detuning from $\nu_{pump}+\nu_m$ (Hz)')
            if not abs:
                if self.re_ax is None:
                    self.re_ax = self.fig_subplots.add_subplot(211)
                self.re_ax.set_ylabel(r'Re($\mathcal{T}$) (a.u.)')
                if self.im_ax is None:
                    self.im_ax = self.fig_subplots.add_subplot(212)
                self.im_ax.set_ylabel(r'Im($\mathcal{T}$) (a.u.)')
            else:
                if self.abs_ax is None:
                    self.abs_ax = self.fig_subplots.add_subplot(111)
                self.abs_ax.set_ylabel(r'|$\mathcal{T}$| (a.u.)')
            if title is None:
                plt.suptitle(r'$\nu_0=${:.2f} GHz, $\nu_m=${:.2f} kHz'.format(
                    self.CAVITY_FREQUENCY / 1e9,
                    self.MECHANICAL_FREQUENCY / 1e3))
            else:
                plt.suptitle(title)
            if plot_color_bar:
                self.plot_color_bar()

        if save and one_plot:
            if self.dir is None:
                self.dir = MEDIA_ROOT + time.strftime("/%Y/%m/%d",
                                                      time.gmtime())
                if not osp.exists(self.dir):
                    os.makedirs(self.dir)
            if self.filename is None:
                self.filename = 'plot.png'
            self.fig_subplots.savefig(self.dir + '/' + self.filename)
            self.fig_subplots.savefig(
                (self.dir + '/' + self.filename).replace('.png', '.pdf'))
            self.fig_subplots.savefig(osp.join(
                self.curves[0].get_or_create_dir(), 'display.png'),
                                      dpi=200)
Esempio n. 49
0
def plot_learning_curve(estimator,
                        title,
                        X,
                        y,
                        ylim=None,
                        cv=None,
                        n_jobs=-1,
                        train_sizes=np.linspace(.1, 1.0, 5)):
    """
    Generate a simple plot of the test and traning learning curve.

    Parameters
    ----------
    estimator : object type that implements the "fit" and "predict" methods
        An object of that type which is cloned for each validation.

    title : string
        Title for the chart.

    X : array-like, shape (n_samples, n_features)
        Training vector, where n_samples is the number of samples and
        n_features is the number of features.

    y : array-like, shape (n_samples) or (n_samples, n_features), optional
        Target relative to X for classification or regression;
        None for unsupervised learning.

    ylim : tuple, shape (ymin, ymax), optional
        Defines minimum and maximum yvalues plotted.

    cv : integer, cross-validation generator, optional
        If an integer is passed, it is the number of folds (defaults to 3).
        Specific cross-validation objects can be passed, see
        sklearn.cross_validation module for the list of possible objects

    n_jobs : integer, optional
        Number of jobs to run in parallel (default 1).
    """
    plt.figure()
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel("Training examples")
    plt.ylabel("Score")
    train_sizes, train_scores, test_scores = learning_curve(
        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)
    plt.grid()

    plt.fill_between(train_sizes,
                     train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std,
                     alpha=0.1,
                     color="r")
    plt.fill_between(train_sizes,
                     test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std,
                     alpha=0.1,
                     color="g")
    plt.plot(train_sizes,
             train_scores_mean,
             'o-',
             color="r",
             label="Training score")
    plt.plot(train_sizes,
             test_scores_mean,
             'o-',
             color="g",
             label="Cross-validation score")

    plt.legend(loc="best")
    return plt
lines = []  # list for plot lines for solvers and analytical solutions
legends = []  # list for legends for solvers and analytical solutions

for solver in solvers:
    line, = ax.plot([], [])
    lines.append(line)
    legends.append(solver.__name__)

line, = ax.plot([], [])  #add extra plot line for analytical solution
lines.append(line)
legends.append('Analytical')

plt.xlabel('x-coordinate [-]')
plt.ylabel('Amplitude [-]')
plt.legend(legends, loc=3, frameon=False)


# initialization function: plot the background of each frame
def init():
    for line in lines:
        line.set_data([], [])
    return lines,


# animation function.  This is called sequentially
def animate(i):
    for k, line in enumerate(lines):
        if (k == 0):
            line.set_data(x, un[i, :])
        else:
def GraphicInterface(category, num):

    s_date = '10/1/2008'
    e_date = '11/15/2018'

    # Retrieve Auto industry: 'GM' , 'F' , 'TM' , 'TSLA’ ,'HMC'
    gm = web.DataReader('GM', data_source='yahoo', start=s_date, end=e_date)
    f = web.DataReader('F', data_source='yahoo', start=s_date, end=e_date)
    tm = web.DataReader('TM', data_source='yahoo', start=s_date, end=e_date)
    tsla = web.DataReader('TSLA', data_source='yahoo', start=s_date, end=e_date)
    hmc = web.DataReader('HMC', data_source='yahoo', start=s_date, end=e_date)

    # Retrieve Bank industry: 'JPM' , 'BAC' , 'HSBC' , 'C’ (Citi group) 'GS’ (Goldman Sach)
    jpm = web.DataReader('JPM', data_source='yahoo', start=s_date, end=e_date)
    bac = web.DataReader('BAC', data_source='yahoo', start=s_date, end=e_date)
    hsbc = web.DataReader('HSBC', data_source='yahoo', start=s_date, end=e_date)
    c = web.DataReader('C', data_source='yahoo', start=s_date, end=e_date)
    gs = web.DataReader('GS', data_source='yahoo', start=s_date, end=e_date)

    # Retrieve Retail industry: ‘WMT’, ‘TGT’, ‘JCP’, ‘HD’,'COST'
    wmt = web.DataReader('WMT', data_source='yahoo', start=s_date, end=e_date)
    tgt = web.DataReader('TGT', data_source='yahoo', start=s_date, end=e_date)
    jcp = web.DataReader('JCP', data_source='yahoo', start=s_date, end=e_date)
    hd = web.DataReader('HD', data_source='yahoo', start=s_date, end=e_date)
    cost = web.DataReader('COST', data_source='yahoo', start=s_date, end=e_date)

    # Retrieve IT industry: 'AAPL','MSFT','AMZN','GOOG','FB','intc'
    aapl = web.DataReader('AAPL', data_source='yahoo', start=s_date, end=e_date)
    msft = web.DataReader('MSFT', data_source='yahoo', start=s_date, end=e_date)
    amzn = web.DataReader('AMZN', data_source='yahoo', start=s_date, end=e_date)
    goog = web.DataReader('GOOG', data_source='yahoo', start=s_date, end=e_date)
    fb = web.DataReader('FB', data_source='yahoo', start=s_date, end=e_date)
    intc = web.DataReader('INTC', data_source='yahoo', start=s_date, end=e_date)

    # Fashion FASHION industry: 'tpr', 'hmb', 'ges', 'mc', 'tif'
    tpr = web.DataReader('TPR', data_source='yahoo', start=s_date, end=e_date)
    hmb = web.DataReader('HM-B.ST', data_source='yahoo', start=s_date, end=e_date)
    ges = web.DataReader('GES', data_source='yahoo', start=s_date, end=e_date)
    mc = web.DataReader('MC', data_source='yahoo', start=s_date, end=e_date)
    tif = web.DataReader('TIF', data_source='yahoo', start=s_date, end=e_date)

    AUTO_name = ['GM', 'F', 'TM', 'TSLA', 'HMC']
    BANK_name = ['JPM', 'BAC', 'HSBC', 'C', 'GS']
    RETAIL_name = ['WMT', 'TGT', 'JCP', 'HD', 'COST']
    IT_name = ['AAPL', 'MSFT', 'AMZN', 'GOOG', 'intc']
    FASHION_name = ['tpr', 'hmb', 'ges', 'mc', 'tif']

    AUTO = [gm, f, tm, tsla, hmc]
    BANK = [jpm, bac, hsbc, c, gs]
    RETAIL = [wmt, tgt, jcp, hd, cost]
    IT = [aapl, msft, amzn, goog, intc]
    FASHION = [tpr, hmb, ges, mc, tif]
    
    stocks = []
    stocks_name = []
    
    for i in category:
        for d in range(num):
            if str(i) == 'A':
                random.shuffle(AUTO)
                stocks.append(AUTO[d])
                stocks_name.append((AUTO_name[d]))
            if str(i) == 'B':
                random.shuffle(BANK)
                stocks.append(BANK[d])
                stocks_name.append((BANK_name[d]))
            if str(i) == 'I':
                random.shuffle(IT)
                stocks.append(IT[d])
                stocks_name.append((IT_name[d]))
            if str(i) == 'R':
                random.shuffle(RETAIL)
                stocks.append(RETAIL[d])
                stocks_name.append((IT_name[d]))
            if str(i) == 'F':
                random.shuffle(FASHION)
                stocks.append(FASHION[d])
                stocks_name.append((IT_name[d]))
    
    # colors = ['y-', 'b-', 'g-', 'k-','r-','c-','m-']
    # fig = pylab.figure(figsize = (10,8))
    for i in range(len(stocks)):
        pylab.plot(stocks[i]['Adj Close'], linewidth=1.5)

    pylab.legend(stocks_name, loc='upper right', shadow=True)
    pylab.ylabel('Adjusted Close Price')
    pylab.title('Adjusted Close Price from 2008 to 2018')
    pylab.grid('on')
    pylab.show()
Esempio n. 52
0
def add_photometry(data, extraction):
    """
    add photometry results to website
    """

    parameters = data['parameters']
    growth_filename = '.diagnostics/curve_of_growth.png'
    fwhm_filename   = '.diagnostics/fwhm.png'

    ##### plot curve-of-growth data
    plt.subplot(211)
    plt.xlabel('Aperture Radius (px)')
    plt.ylim([-0.1,1.1])
    plt.xlim([min(parameters['aprad']), max(parameters['aprad'])])
    plt.ylabel('Fractional Combined Flux')
    if not parameters['target_only']:
        plt.errorbar(parameters['aprad'], data['background_flux'][0], 
                     data['background_flux'][1], color='black', 
                     linewidth=1, 
                     label='background objects')
    if not parameters['background_only']:
        plt.errorbar(parameters['aprad'], data['target_flux'][0], 
                     data['target_flux'][1], color='red', linewidth=1, 
                     label='target')
    plt.plot([data['optimum_aprad'], data['optimum_aprad']], 
             [plt.ylim()[0], plt.ylim()[1]], 
             linewidth=2, color='black')
    plt.plot([plt.xlim()[0], plt.xlim()[1]], 
             [data['fluxlimit_aprad'], data['fluxlimit_aprad']], 
             color='black', linestyle='--')
    plt.grid()
    plt.legend(loc=4)

    plt.subplot(212)
    plt.ylim([-0.1,1.1])
    plt.xlim([min(parameters['aprad']), max(parameters['aprad'])])
    plt.xlabel('Aperture Radius (px)')
    plt.ylabel('SNR')
    if not parameters['target_only']:
        plt.errorbar(parameters['aprad'], data['background_snr'], 
                     color='black', linewidth=1)
    if not parameters['background_only']:
        plt.errorbar(parameters['aprad'], data['target_snr'], 
                     color='red', linewidth=1)
    plt.plot([data['optimum_aprad'], data['optimum_aprad']], 
             [plt.ylim()[0], plt.ylim()[1]], 
             linewidth=2, color='black')
    plt.grid()
    plt.savefig(growth_filename, format='png')
    plt.close()
    data['growth_filename'] = growth_filename


    ##### plot fwhm as a function of time
    frame_midtimes = [frame['time'] for frame in extraction]
    fwhm = [numpy.median(frame['catalog_data']['FWHM_IMAGE'])
            for frame in extraction]
    fwhm_sig = [numpy.std(frame['catalog_data']['FWHM_IMAGE'])
                for frame in extraction]

    plt.subplot()
    plt.title('Median PSF FWHM per Frame')
    plt.xlabel('Observation Midtime (JD)')
    plt.ylabel('Point Source FWHM (px)')
    plt.scatter(frame_midtimes, fwhm, marker='o', 
                color='black')
    xrange = [plt.xlim()[0], plt.xlim()[1]]
    plt.plot(xrange, [data['optimum_aprad']*2, data['optimum_aprad']*2], 
             color='red')
    plt.xlim(xrange)
    plt.ylim([0, max([data['optimum_aprad']*2+1, max(fwhm)])])

    plt.grid()
    plt.savefig(fwhm_filename, format='png')
    plt.close()
    data['fwhm_filename'] = fwhm_filename


    ### update index.html
    html  = "<H2>Photometric Calibration - Aperture Size </H2>\n"
    html += ("optimum aperture radius derived as %5.2f (px) " + \
             "through curve-of-growth analysis based on\n") % \
        data['optimum_aprad']
    if data['n_target'] > 0 and data['n_bkg'] > 0:
        html += ("%d frames with target and %d frames with " + \
                "background detections.\n") % \
                (data['n_target'], data['n_bkg'])
    elif data['n_target'] == 0 and data['n_bkg'] > 0:
        html += "%d frames with background detections.\n" % data['n_bkg']
    elif data['n_bkg'] ==0 and data['n_target'] > 0:
        html += "%d frames with target detections.\n" % data['n_target']
    else:
        html += "no target or background detections."

    html += "<P><IMG SRC=\"%s\">\n" % data['growth_filename']
    html += "<IMG SRC=\"%s\">\n" % data['fwhm_filename']
    html += ("<P> Current strategy for finding the optimum aperture " + \
             "radius: %s\n" % data['aprad_strategy'])

    append_website(_pp_conf.index_filename, html,
                   replace_below=("<H2>Photometric Calibration" +
                                  " - Aperture Size </H2>\n"))

    return None
Esempio n. 53
0

plt.figure(1)

for V in V:
    # Vector de tiempo
    t = sp.linspace(0, 5.6, 1001)

    # Parte en el origen
    vi = 100 * 1000 / 3600.
    z0 = sp.array([0, 0, vi, vi])

    sol = odeint(bala, z0, t)

    x = sol[:, 0]
    y = sol[:, 1]

    plt.plot(x, y)

plt.title("Trayectoria para distintos vientos")
plt.ylabel("Y (m)")
plt.xlabel("X (m)")
plt.legend(["V = 0 m/s", "V = 10 m/s", "V = 20 m/s"])

plt.grid()

plt.axis([0, 160, 0, 50])

plt.savefig("grafico_balistica.png")
plt.show()
Esempio n. 54
0
import numpy as np
from matplotlib import pylab as plt

import noba_model
import pymc
from pymc import MCMC
from pymc.Matplot import plot as mcplot

M = MCMC(noba_model)

M.sample(iter=2000000, burn=0, thin=10, verbose=0)
mcplot(M)

plt.hist([M.trace('intrinsic_rate')[:]], 500, label='intrinsic')
plt.hist([M.trace('social_rate')[:]], 500, label='social')
plt.legend(loc='upper left')
plt.xlim(0, 0.2)
plt.show()

plt.hist([M.trace('lag')[:]])
plt.legend(loc='upper left')
plt.xlim(0, 5)
plt.show()

plt.hist([M.trace('dist')[:]], 100)
plt.legend(loc='upper left')
plt.xlim(0, 200)
plt.show()

np.savetxt('distNOBA.txt', M.trace('dist')[:])
np.savetxt('lagNOBA.txt', M.trace('lag')[:])
Esempio n. 55
0
stars = []
total_reviews = []
result = ratings.countByValue()
sorted_results = collections.OrderedDict(sorted(result.items()))
for key, value in sorted_results.items():
    stars.append('%s' % (key))
    total_reviews.append('%i' % (value))
d = {'Stars': stars, 'Reviews': total_reviews}
ratings.df = pd.DataFrame(data=d, dtype='int64')
sns.barplot(ratings.df['Stars'],
            ratings.df['Reviews'],
            hue=ratings.df['Stars'])
# Top 10 Movies having most ratings
Movie_id = []
reviews_t = []
result_2 = most_movies.countByValue()
sorted_results_2 = collections.OrderedDict(sorted(result_2.items()))
for key, value in sorted_results_2.items():
    Movie_id.append('%s' % (key))
    reviews_t.append('%i' % (value))
d_2 = {'Movie_id': Movie_id, 'Reviews': reviews_t}
movie_reviews = pd.DataFrame(data=d_2, dtype='int64')
movie_sub = movie_reviews.sort_values('Reviews', ascending=False)[0:10]
sns.factorplot(x='Movie_id',
               y='Reviews',
               hue='Movie_id',
               kind='bar',
               data=movie_sub,
               size=6)
pylab.legend(loc=9, bbox_to_anchor=(0.5, -0.2), ncol=10)
Esempio n. 56
0
        yTriHOptOnDomain.append(structHOpt['maxedValue'])
        yTriHMaxOnDomain.append(structHMax['maxedValue'])
        yTriHMinOnDomain.append(structHMin['minValue'])

        #print("yTriHOptOnDomain", yTriHOptOnDomain)

    plt.title(
        "Curves obtained with %d points using the triangular kernel. \n hOpt = %.3g, hMax and hMin in [hOpt - %.3g, hOpt + %.3g]\n"
        % (nbPointsTot, hOpt, epsilon, epsilon))
    plt.xlabel("x")
    plt.ylabel("y")
    plt.plot(domain, yTriHOptOnDomain, label="RegHOpt")
    plt.plot(domain, yTriHMaxOnDomain, label="RegHMax")
    plt.plot(domain, yTriHMinOnDomain, label="RegHMin")
    plt.xlim(-5, 10)
    plt.legend(
        loc="upper right")  # loc=2, borderaxespad=0., bbox_to_anchor=(.5, 1)
    # plt.gca().set_position([0, 0, 0.8, 0.8])
    plt.show()

    # fonction initiale

#yInitialBimodal.append(tKernelTri.dataset)
"""
print("hOpt tableau")
print(yTriHOptOnDomain)

print("taille hOpt tableau")
print(len(yTriHOptOnDomain))

print("hMax tableau")
print(yTriHMaxOnDomain)
Esempio n. 57
0
def plotprofs(simname, spcname, varunits, outtype, outfn, itime, zmax, xmax,
              hc):
    """Create a one-panel vertical profile figure of the budget rates for a defined species variable    
       at one defined time

    Args:
       simname  (str)   : ACCESS simulation name
       spcname  (str)   : name of species plotted
       varunits (str)   : units string for x-axis label
       outtype  (str)   : either 'pdf', 'png', or 'x11'
       outfn    (str)   : string for output file name
       itime    (int)   : simulation output time step number 
       zmax     (float) : height of the top of the plotted domain (m)
       xmax     (float) : maximum value on x-axis
       hc       (flost) : canopy height (m)

    Returns:
       Nothing
    """
    # read elapsed hour/datetime key file
    dts, hrs = timekeys(simname)

    # get budget data for the species
    dirname = "budget"

    # constrained
    z, bad = get1Dvar(simname, dirname, spcname + "_bcn")

    # chemistry
    z, bch = get1Dvar(simname, dirname, spcname + "_bch")

    # deposition
    z, bdp = get1Dvar(simname, dirname, spcname + "_bdp")

    # emission
    z, bem = get1Dvar(simname, dirname, spcname + "_bem")

    # vertical transport
    z, bvt = get1Dvar(simname, dirname, spcname + "_bvt")

    nts = len(hrs)  # number of time slices

    # create the plot
    fig, ax = plt.subplots(1, 1, figsize=(8, 10))

    # plot the vertical profiles of budget rates at the specified time
    plt.plot(bad[:, itime],
             z,
             color=colors[0],
             linestyle="-",
             linewidth=lnwdth,
             label="cns")
    plt.plot(bch[:, itime],
             z,
             color=colors[3],
             linestyle="-",
             linewidth=lnwdth,
             label="chm")
    plt.plot(bdp[:, itime],
             z,
             color=colors[4],
             linestyle="-",
             linewidth=lnwdth,
             label="dep")
    plt.plot(bem[:, itime],
             z,
             color=colors[5],
             linestyle="-",
             linewidth=lnwdth,
             label="ems")
    plt.plot(bvt[:, itime],
             z,
             color=colors[1],
             linestyle="-",
             linewidth=lnwdth,
             label="vtx")

    # limit to specified height
    nz = len(z)
    if (zmax == -1.):
        zmax = z[nz - 1]
    plt.ylim(-0.1, zmax)
    if (xmax != -1.):
        plt.xlim(-0.1, xmax)

    # draw line showing canopy height, if applicable
    if (zmax > hc):
        ahc = [hc, hc]
        xbnds = list(ax.get_xlim())
        plt.plot(xbnds, ahc, color='0.25', linestyle='--', linewidth=lnwdth)
        plt.xlim(xbnds[0], xbnds[1])

    # set labels and title
    plt.xlabel(varunits, fontsize=xfsize, labelpad=xlabpad)
    plt.ylabel("z (m)", fontsize=yfsize, labelpad=ylabpad)
    plt.title(spcname + "-" + simname + "-" + hrs[itime] + "LT",
              fontsize=tfsize,
              y=tyloc)

    # set standard formatting
    setstdfmts(ax, tlmaj, tlmin, tlbsize, tlbpad)

    # add legend
    plt.legend(loc=4, fontsize=lfsize, bbox_to_anchor=(0.99, 0.10))

    # create output
    pltoutput(simname, outfn, outtype)

    return
Esempio n. 58
0
        semilogx(lambdas, mean_w_vs_lambda.T[:, 1:],
                 '.-')  # Don't plot the bias term
        xlabel('Regularization factor')
        ylabel('Mean Coefficient Values')
        grid()
        # You can choose to display the legend, but it's omitted for a cleaner
        # plot, since there are many attributes
        #legend(attributeNames[1:], loc='best')

        subplot(1, 2, 2)
        title('Optimal lambda: 1e{0}'.format(np.log10(opt_lambda)))
        loglog(lambdas, train_err_vs_lambda.T, 'b.-', lambdas,
               test_err_vs_lambda.T, 'r.-')
        xlabel('Regularization factor')
        ylabel('Squared error (crossvalidation)')
        legend(['Train error', 'Validation error'])
        grid()

    # To inspect the used indices, use these print statements
    #print('Cross validation fold {0}/{1}:'.format(k+1,K))
    #print('Train indices: {0}'.format(train_index))
    #print('Test indices: {0}\n'.format(test_index))

    k += 1

show()
# Display results
print('Linear regression without feature selection:')
print('- Training error: {0}'.format(Error_train.mean()))
print('- Test error:     {0}'.format(Error_test.mean()))
print('- R^2 train:     {0}'.format(
Esempio n. 59
0
def main():
    global no_words
    train_word, test_word, no_words = data_read_words()
    train_char, test_char = data_read_chars()

    char_rnn_model_LSTM_2layer_data = char_rnn_model_LSTM_2layer(
        train_char, test_char, 1)
    char_rnn_model_LSTM_2layer_gradient_clipping_data = char_rnn_model_LSTM_2layer_gradient_clipping(
        train_char, test_char, 1)
    char_rnn_model_vanilla_2layer_data = char_rnn_model_vanilla_2layer(
        train_char, test_char, 1)
    char_rnn_model_vanilla_2layer_gradient_clipping_data = char_rnn_model_vanilla_2layer_gradient_clipping(
        train_char, test_char, 1)
    word_rnn_model_LSTM_2layer_data = word_rnn_model_LSTM_2layer(
        train_word, test_word, 1)
    word_rnn_model_LSTM_2layer_gradient_clipping_data = word_rnn_model_LSTM_2layer_gradient_clipping(
        train_word, test_word, 1)
    word_rnn_model_vanilla_2layer_data = word_rnn_model_vanilla_2layer(
        train_word, test_word, 1)
    word_rnn_model_vanilla_2layer_gradient_clipping_data = word_rnn_model_vanilla_2layer_gradient_clipping(
        train_word, test_word, 1)

    accuracy_list, entropy_list = [], []
    accuracy_list.append(char_rnn_model_LSTM_2layer_data[0])
    accuracy_list.append(char_rnn_model_LSTM_2layer_gradient_clipping_data[0])
    accuracy_list.append(char_rnn_model_vanilla_2layer_data[0])
    accuracy_list.append(
        char_rnn_model_vanilla_2layer_gradient_clipping_data[0])
    accuracy_list.append(word_rnn_model_LSTM_2layer_data[0])
    accuracy_list.append(word_rnn_model_LSTM_2layer_gradient_clipping_data[0])
    accuracy_list.append(word_rnn_model_vanilla_2layer_data[0])
    accuracy_list.append(
        word_rnn_model_vanilla_2layer_gradient_clipping_data[0])

    entropy_list.append(char_rnn_model_LSTM_2layer_data[1])
    entropy_list.append(char_rnn_model_LSTM_2layer_gradient_clipping_data[1])
    entropy_list.append(char_rnn_model_vanilla_2layer_data[1])
    entropy_list.append(
        char_rnn_model_vanilla_2layer_gradient_clipping_data[1])
    entropy_list.append(word_rnn_model_LSTM_2layer_data[1])
    entropy_list.append(word_rnn_model_LSTM_2layer_gradient_clipping_data[1])
    entropy_list.append(word_rnn_model_vanilla_2layer_data[1])
    entropy_list.append(
        word_rnn_model_vanilla_2layer_gradient_clipping_data[1])

    name_list = [
        "Char RNN 2 Layer LSTM",
        "Char RNN 2 Layer LSTM w/ GC",
        "Char RNN 2 Layer Vanilla",
        "Char RNN 2 Layer Vanilla w/ GC",
        "Word RNN 2 Layer LSTM",
        "Word RNN 2 Layer LSTM w/ GC",
        "Word RNN 2 Layer Vanilla",
        "Word RNN 2 Layer Vanilla w/ GC",
    ]

    fig1 = plt.figure(figsize=(16, 8))
    for i in range(8):
        plt.plot(
            range(epochs),
            entropy_list[i],
            label="Entropy Cost for " + str(name_list[i]),
        )
    plt.xlabel("Epochs")
    plt.ylabel("Entropy Cost")
    plt.legend()
    fig1.savefig("../Out/B6c_Cost.png")

    fig2 = plt.figure(figsize=(16, 8))
    for i in range(8):
        plt.plot(
            range(epochs),
            accuracy_list[i],
            label="Test Accuracy for " + str(name_list[i]),
        )
    plt.xlabel("Epochs")
    plt.ylabel("Train Accuracy")
    plt.legend()
    fig2.savefig("../Out/B6c_Accuracy.png")

    fig3 = plt.figure(figsize=(16, 8))
    for i in range(4):
        plt.plot(
            range(epochs),
            accuracy_list[i],
            label="Test Accuracy for " + str(name_list[i]),
        )
    plt.xlabel("Epochs")
    plt.ylabel("Train Accuracy")
    plt.legend()
    fig3.savefig("../Out/B6c_Char_Accuracy.png")

    fig4 = plt.figure(figsize=(16, 8))
    for i in range(4, 8):
        plt.plot(
            range(epochs),
            accuracy_list[i],
            label="Test Accuracy for " + str(name_list[i]),
        )
    plt.xlabel("Epochs")
    plt.ylabel("Train Accuracy")
    plt.legend()
    fig4.savefig("../Out/B6c_Word_Accuracy.png")

    with open("../Out/6c.csv", "w") as f:
        f.write("type,epoch,test accuracy,entropy_cost\n")
        for i in range(8):
            for e in range(epochs):
                f.write("%s,%s,%s,%s\n" % (
                    name_list[i],
                    str(e),
                    str(accuracy_list[i][e]),
                    str(entropy_list[i][e]),
                ))
Esempio n. 60
0
 def legend_(*args, **kwargs):
     kwargs.setdefault('framealpha', 0.3)
     kwargs.setdefault('fancybox', True)
     kwargs.setdefault('fontsize', rcParamsDefault['font.size'] - 2)
     pylab.legend(*args, **kwargs)