def fit_calib_auto(X_bh, X_bg_gh, do_l1=False):
    """
Does auto-regression on the 6-DOF variables : x,y,z,r,p,y.
"""
    assert X_bh.shape[0]==X_bg_gh.shape[0]==12, "calib data has unknown shape."
    
    #X_bh = X_bh[:,500:1000]
    #X_bg_gh = X_bg_gh[:,500:1000]
    
    axlabels = ['x','y','z','roll','pitch','yaw']
    for k in xrange(1,100, 10):
        print "order : k=", k
        plt.clf()
        W = np.empty((6, 2*k+1))
        for i in xrange(6):
            j = i+3 if i > 2 else i
            Ai, bi, W[i,:] = fit_auto(X_bh[j,:], X_bg_gh[j,:], k, do_l1)
            est = Ai.dot(W[i,:])
            print " norm err : ", np.linalg.norm(bi - est)
    
            plt.subplot(3,2,i+1)
            plt.plot(bi, label='pr2')
            plt.plot(X_bh[j,k-1:], label='hydra')
            plt.plot(est, label='estimate')
            plt.ylabel(axlabels[i])
            plt.legend()
        plt.show()
예제 #2
0
파일: vksz.py 프로젝트: amanzotti/vksz
def make_corr1d_fig(dosave=False):
    corr = make_corr_both_hemi()
    lw=2; fs=16
    pl.figure(1)#, figsize=(8, 7))
    pl.clf()
    pl.xlim(4,300)
    pl.ylim(-400,+500)    
    lambda_titles = [r'$20 < \lambda < 30$',
                     r'$30 < \lambda < 40$',
                     r'$\lambda > 40$']
    colors = ['blue','green','red']
    for i in range(3):
        corr1d, rcen = corr_1d_from_2d(corr[i])
        ipdb.set_trace()
        pl.semilogx(rcen, corr1d*rcen**2, lw=lw, color=colors[i])
        #pl.semilogx(rcen, corr1d*rcen**2, 'o', lw=lw, color=colors[i])
    pl.xlabel(r'$s (Mpc)$',fontsize=fs)
    pl.ylabel(r'$s^2 \xi_0(s)$', fontsize=fs)    
    pl.legend(lambda_titles, 'lower left', fontsize=fs+3)
    pl.plot([.1,10000],[0,0],'k--')
    s_bao = 149.28
    pl.plot([s_bao, s_bao],[-9e9,+9e9],'k--')
    pl.text(s_bao*1.03, 420, 'BAO scale')
    pl.text(s_bao*1.03, 370, '%0.1f Mpc'%s_bao)
    if dosave: pl.savefig('xi1d_3bin.pdf')
예제 #3
0
파일: vksz.py 프로젝트: amanzotti/vksz
def study_sdss_density(hemi='south'):
    grid = grid3d(hemi=hemi)
    n_data = num_sdss_data_both_catalogs(hemi, grid)
    n_rand, weight = num_sdss_rand_both_catalogs(hemi, grid)
    n_rand *= ((n_data*weight).sum() / (n_rand*weight).sum())
    delta = (n_data - n_rand) / n_rand
    delta[weight==0]=0.
    fdelta = np.fft.fftn(delta*weight)
    power = np.abs(fdelta)**2.
    ks = get_wavenumbers(delta.shape, grid.reso_mpc)
    kmag = ks[3]
    kbin = np.arange(0,0.06,0.002)
    ind = np.digitize(kmag.ravel(), kbin)
    power_ravel = power.ravel()
    power_bin = np.zeros_like(kbin)
    for i in range(len(kbin)):
        print i
        wh = np.where(ind==i)[0]
        power_bin[i] = power_ravel[wh].mean()
    #pl.clf()
    #pl.plot(kbin, power_bin)
    from cosmolopy import perturbation
    pk = perturbation.power_spectrum(kbin, 0.4, **cosmo)
    pl.clf(); pl.plot(kbin, power_bin/pk, 'b')
    pl.plot(kbin, power_bin/pk, 'bo')    
    pl.xlabel('k (1/Mpc)',fontsize=16)
    pl.ylabel('P(k) ratio, DATA/THEORY [arb. norm.]',fontsize=16)
    ipdb.set_trace()
예제 #4
0
def plot_call_rate(c):
    # Histogram
    P.clf()
    P.figure(1)
    P.hist(c[:,1], normed=True)
    P.xlabel('Call Rate')
    P.ylabel('Portion of Variants')
    P.savefig(os.environ['OBER'] + '/doc/imputation/cgi/call_rate.png')

####################################################################################
#if __name__ == '__main__':
#    # Input parameters
#    file_name = sys.argv[1]  # Name of data file with MAF, call rates
#
#    # Load data
#    c = np.loadtxt(file_name, dtype=np.float16)
#
#    # Breakdown by call rate (proportional to the #samples, 1415)
#    plot_call_rate(c)
#    h = np.histogram(c[:,1])
#    a = np.flipud(np.cumsum(np.flipud(h[0])))/float(c.shape[0])
#    print np.concatenate((h[1][:-1][newaxis].transpose(), a[newaxis].transpose()), axis=1)

    # Breakdown by minor allele frequency
    maf_n = 20
    maf_bins = np.linspace(0, 0.5, maf_n + 1)
    maf_bin = np.digitize(c[:,0], maf_bins)
    d = c.astype(float64)
    mean_call_rate = np.array([(1.*np.mean(d[maf_bin == i,1])) for i in xrange(len(maf_bins))])
    P.bar(maf_bins - h, mean_call_rate, width=h)

    P.figure(2)
    h = (maf_bins[-1] - maf_bins[0]) / maf_n
    P.bar(maf_bins - h, mean_call_rate, width=h)
    P.savefig(os.environ['OBER'] + '/doc/imputation/cgi/call_rate_maf.png')
예제 #5
0
파일: vksz.py 프로젝트: amanzotti/vksz
def plot_many_corr_delta_vel():
    pl.clf()
    leg = []
    for kmax in [0.05, 0.1, 0.2, 0.5, 1., 2., 5.]:
        plot_corr_delta_vel(kmin=1e-3, kmax=kmax, doclf=False)
        leg.append('kmax=%0.2f'%kmax)
    pl.legend(leg)
예제 #6
0
파일: vksz.py 프로젝트: amanzotti/vksz
def study_redmapper_2d():
    # I just want to know the typical angular separation for RM clusters.
    # I'm going to do this in a lazy way.
    hemi = 'north'
    rm = load_redmapper(hemi=hemi)
    ra = rm['ra']
    dec = rm['dec']
    ncl = len(ra)
    dist = np.zeros((ncl, ncl))
    for i in range(ncl):
        this_ra = ra[i]
        this_dec = dec[i]
        dra = this_ra-ra
        ddec = this_dec-dec
        dxdec = dra*np.cos(this_dec*np.pi/180.)
        dd = np.sqrt(dxdec**2. + ddec**2.)
        dist[i,:] = dd
        dist[i,i] = 99999999.
    d_near_arcmin = dist.min(0)*60.
    pl.clf(); pl.hist(d_near_arcmin, bins=100)
    pl.title('Distance to Nearest Neighbor for RM clusters')
    pl.xlabel('Distance (arcmin)')
    pl.ylabel('N')
    fwhm_planck_217 = 5.5 # arcmin
    sigma = fwhm_planck_217/2.355
    frac_2sigma = 1.*len(np.where(d_near_arcmin>2.*sigma)[0])/len(d_near_arcmin)
    frac_3sigma = 1.*len(np.where(d_near_arcmin>3.*sigma)[0])/len(d_near_arcmin)
    print '%0.3f percent of RM clusters are separated by 2-sigma_planck_beam'%(100.*frac_2sigma)
    print '%0.3f percent of RM clusters are separated by 3-sigma_planck_beam'%(100.*frac_3sigma)    
    ipdb.set_trace()
예제 #7
0
파일: vksz.py 프로젝트: amanzotti/vksz
def study_multiband_planck(quick=True):
    savename = datadir+'cl_multiband.pkl'
    bands = [100, 143, 217, 'mb']
    if quick: cl = pickle.load(open(savename,'r'))
    else:
        cl = {}
        mask = load_planck_mask()
        mask_factor = np.mean(mask**2.)
        for band in bands:
            this_map = load_planck_data(band)
            this_cl = hp.anafast(this_map*mask, lmax=lmax)/mask_factor
            cl[band] = this_cl
        pickle.dump(cl, open(savename,'w'))


    cl_theory = {}
    pl.clf()
    
    for band in bands:
        l_theory, cl_theory[band] = get_cl_theory(band)
        this_cl = cl[band]
        pl.plot(this_cl/cl_theory[band])
        
    pl.legend(bands)
    pl.plot([0,4000],[1,1],'k--')
    pl.ylim(.7,1.3)
    pl.ylabel('data/theory')
예제 #8
0
def make_plot_mG_nH(mgrpArr_all, denArr_all, over_density2, fignamestr, ngrp=15):
	fig = plt.figure(3)
	plt.clf()
	ax = fig.add_subplot(111)

	idArr, bins = pTdf.group_by_prop(mgrpArr_all, ngrp=ngrp, takelog=True, fixed_interval=True)
	ngrp = len(bins) - 1
	bins = 10.**bins
		
	f_nHArr = N.zeros(len(bins)-1)

	rho_mean = cTdf.mean_hydrogen_numden_z(simI["zcol"])
	for igrp in xrange(ngrp):
		isame = N.where(idArr == igrp)[0]
		print "ptcls in [%.2e]: %d" % (bins[igrp], len(isame))
		if len(isame) > 0:
			ihigh = N.where(denArr_all[isame, izcol] >= over_density2*rho_mean)[0]
			f_nHArr[igrp] = 1.*len(ihigh)/(1.*len(isame))

	print "x({:d}): {:}".format(len(bins[:-1]+bins[1:]), (bins[:-1]+bins[1:])/2.)
	print "y({:d}): {:}".format(len(f_nHArr), f_nHArr)

	ax.plot((bins[:-1]+bins[1:])/2., f_nHArr)
	ax.set_xlabel(r"$M_h$") 
	ax.set_ylabel(r"$f_{n_H > %d \bar{n_H}}(%.1f)$" % (over_density2, simI["zcol"]))
	ax.set_xscale("log")
	ax.set_xlim(bins[0], bins[-1]); ax.set_ylim(0., 1.1)
	#plt.setp(ax.get_xticklabels(), fontsize=fs["tc"])
	#plt.setp(ax.get_yticklabels(), fontsize=fs["tc"])

	figname = "mgrp-fnH_%s_ov%d_vr%d.%s" % (fignamestr[1], int(over_density2), simI["virial_radius"], figext)
	plt.savefig(os.path.join(fignamestr[2], figname))
예제 #9
0
파일: vksz.py 프로젝트: amanzotti/vksz
def study_redmapper_lrg_3d(hemi='north'):
    # create 3d grid object
    grid = grid3d(hemi=hemi)
    
    # load SDSS data
    sdss = load_sdss_data_both_catalogs(hemi)
    
    # load redmapper catalog
    rm = load_redmapper(hemi=hemi)
    
    # get XYZ positions (Mpc) of both datasets
    x_sdss, y_sdss, z_sdss = grid.xyz_from_radecz(sdss['ra'], sdss['dec'], sdss['z'], applyzcut=False)
    x_rm, y_rm, z_rm = grid.xyz_from_radecz(rm['ra'], rm['dec'], rm['z_spec'], applyzcut=False)
    pos_sdss = np.vstack([x_sdss, y_sdss, z_sdss]).T
    pos_rm = np.vstack([x_rm, y_rm, z_rm]).T

    # build a couple of KDTree's, one for SDSS, one for RM.
    from sklearn.neighbors import KDTree
    tree_sdss = KDTree(pos_sdss, leaf_size=30)
    tree_rm = KDTree(pos_rm, leaf_size=30)

    lrg_counts = tree_sdss.query_radius(pos_rm, 100., count_only=True)
    pl.clf()
    pl.hist(lrg_counts, bins=50)
    
    
    ipdb.set_trace()
예제 #10
0
def Animate(g):
    for i in range(1,64,5):
        pylab.clf()
        x= g[0:i,:,:]
        y= numarray.sum(x, axis=0)
        pylab.matshow( y)
        pylab.savefig("temp/3dturb-%03i.png" % i)
예제 #11
0
def plot_mock(mock):
    plt.clf()
    plt.plot(mock['dates'], mock['y'], marker='+', color='blue',
             label='data', markersize=9)
    plt.plot(mock['dates'], mock['y_without_seasonal'],
             color='green', alpha=0.6, linewidth=1,
             label='model without seasonal')
예제 #12
0
def plot_feat_hist(data_name_list, filename=None):
    pylab.clf()
    num_rows = 1 + (len(data_name_list) - 1) / 2
    num_cols = 1 if len(data_name_list) == 1 else 2
    pylab.figure(figsize=(5 * num_cols, 4 * num_rows))

    for i in range(num_rows):
        for j in range(num_cols):
            pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
            x, name = data_name_list[i * num_cols + j]
            pylab.title(name)
            pylab.xlabel('Value')
            pylab.ylabel('Density')
            # the histogram of the data
            max_val = np.max(x)
            if max_val <= 1.0:
                bins = 50
            elif max_val > 50:
                bins = 50
            else:
                bins = max_val
            n, bins, patches = pylab.hist(
                x, bins=bins, normed=1, facecolor='green', alpha=0.75)

            pylab.grid(True)

    if not filename:
        filename = "feat_hist_%s.png" % name

    pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
예제 #13
0
def PlotTurbulenceIllustr(a):

    """
    Can generate the grid with
    g=kolmogorovutils.GenerateKolmogorov3D( 1025, 129, 129)
    a=kolmogorovutils.GridToNumarray(g)

    """

    for x in [1,10,100]:

        suba= numarray.sum(a[:,:,0:x], axis=2)

        suba.transpose()
        pylab.clf()
        pylab.matshow(suba)
        pylab.savefig("temp/turb3d-sum%03i.eps" % x)

    for x in [1,10,100]:

        for j in [0,1,2]:

            suba= numarray.sum(a[:,:200,j*x:(j+1)*x], axis=2)

            suba.transpose()
            pylab.clf()
            pylab.matshow(suba)
            pylab.savefig("temp/turb3d-sum%03i-s%i.eps" % (x,j))        
예제 #14
0
def get_histogram_scale(distances_dict, nbins):
    """Draws histogram to outfile_name.
    """
    scale_dict = defaultdict(list)
    #draw histograms
    for d_dict in distances_dict.values():
        for i, (field, data) in enumerate(d_dict.items()):
            if len(data) < 1:
                continue
            histogram = hist(data,bins=nbins)
            
            fig  = gcf()
            axis = fig.gca()

            #get height scale: y/x
            ymin,ymax = axis.get_ylim()
        
            xmin,xmax = axis.get_xlim()
            scale_dict['ymin'].append(ymin)
            scale_dict['ymax'].append(ymax)
            scale_dict['xmin'].append(xmin)
            scale_dict['xmax'].append(xmax)

            clf()
    
    yscale = (min(scale_dict['ymin']),max(scale_dict['ymax']))
    xscale = (min(scale_dict['xmin']),max(scale_dict['xmax']))
    
    return xscale,yscale
    def plot(self, bit_stream):
        if self.previous_bit_stream != bit_stream.to_list():
            self.previous_bit_stream = bit_stream

            x = []
            y = []
            bit = None

            for bit_time in bit_stream.to_list():
                if bit is None:
                    x.append(bit_time)
                    y.append(0)
                    bit = 0
                elif bit == 0:
                    x.extend([bit_time, bit_time])
                    y.extend([0, 1])
                    bit = 1
                elif bit == 1:
                    x.extend([bit_time, bit_time])
                    y.extend([1, 0])
                    bit = 0

            plt.clf()
            plt.plot(x, y)
            plt.xlim([0, 10000])
            plt.ylim([-0.1, 1.1])
            plt.show()
            plt.pause(0.005)
예제 #16
0
파일: validation.py 프로젝트: j-dr/ADDHALOS
def plotFeaturePDF(ift, pft, outbase, fmin=0.0, fmax=1.0, fstep=0.01):
    """
    Plot a comparison between the input feature distribution and the 
    feature distribution of the predicted halos
    """
    plt.clf()
    nfbins = ( fmax - fmin ) / fstep
    fbins = np.logspace( fmin, fmax, nfbins )
    fcen = ( fbins[:-1] + fbins[1:] ) / 2

    plt.xscale( 'log', nonposx='clip' )
    plt.yscale( 'log', nonposy='clip' )
    
    ic, e, p = plt.hist( ift, fbins, label='Original Halos', alpha=0.5, normed=True )
    pc, e, p = plt.hist( pft, fbins, label='Added Halos', alpha=0.5, normed=True )

    plt.legend()
    plt.xlabel( r'$\delta$' )
    plt.savefig( outbase+'_fpdf.png' )

    fdtype = np.dtype( [ ('fcen', float), ('ifcounts', float), ('pfcounts', float) ] )
    fd = np.ndarray( len(fcen), dtype = fdtype )
    fd[ 'mcen' ] = fcen
    fd[ 'imcounts' ] = ic
    fd[ 'pmcounts' ] = pc

    fitsio.write( outbase+'_fpdf.fit', fd )
예제 #17
0
파일: validation.py 프로젝트: j-dr/ADDHALOS
def plotMassFunction(im, pm, outbase, mmin=9, mmax=13, mstep=0.05):
    """
    Make a comparison plot between the input mass function and the 
    predicted projected correlation function
    """
    plt.clf()

    nmbins = ( mmax - mmin ) / mstep
    mbins = np.logspace( mmin, mmax, nmbins )
    mcen = ( mbins[:-1] + mbins[1:] ) /2
    
    plt.xscale( 'log', nonposx = 'clip' )
    plt.yscale( 'log', nonposy = 'clip' )
    
    ic, e, p = plt.hist( im, mbins, label='Original Halos', alpha=0.5, normed = True)
    pc, e, p = plt.hist( pm, mbins, label='Added Halos', alpha=0.5, normed = True)
    
    plt.legend()
    plt.xlabel( r'$M_{vir}$' )
    plt.ylabel( r'$\frac{dN}{dM}$' )
    #plt.tight_layout()
    plt.savefig( outbase+'_mfcn.png' )
    
    mdtype = np.dtype( [ ('mcen', float), ('imcounts', float), ('pmcounts', float) ] )
    mf = np.ndarray( len(mcen), dtype = mdtype )
    mf[ 'mcen' ] = mcen
    mf[ 'imcounts' ] = ic
    mf[ 'pmcounts' ] = pc

    fitsio.write( outbase+'_mfcn.fit', mf )
예제 #18
0
def make_l1tf_mock(doplot=doplot, period=6, sea_amp=0.05, noise=0.0):
    np.random.seed(3733)
    num = 100
    x = np.arange(num)
    y = x * 0.0
    y[0:20] = 20.0 + x[0:20] * 1.5
    y[20:50] = y[19] - (x[20:50] - x[19]) * 0.2
    y[50:60] = y[49] + (x[50:60] - x[49]) * 0.47
    y[60:75] = y[59] - (x[60:75] - x[59]) * 2.4
    y[75:] = y[74] + (x[75:] - x[74]) * 2.0
    y = y / y.max()
    y = y + noise * np.random.randn(num)
    if period > 0:
        seas = np.random.randn(period) * sea_amp
        seas_lookup = {k: v for k, v in enumerate(seas)}
        seasonal_part = np.array([seas_lookup[i % period] for i in x])
        seasonal_part = seasonal_part - seasonal_part.mean()
        y_with_seasonal = y + seasonal_part
    else:
        y_with_seasonal = y

    if doplot:
        plt.clf()
        lab = "True, period=%s" % period
        plt.plot(x, y, marker="o", linestyle="-", label=lab, markersize=8, alpha=0.3, color="blue")
        lab = "True + seasonality, period=%s" % period
        plt.plot(x, y_with_seasonal, marker="o", linestyle="-", label=lab, markersize=8, alpha=0.3, color="red")

    np.random.seed(None)
    return {"x": x, "y": y, "y_with_seasonal": y_with_seasonal, "seas_lookup": seas_lookup}
예제 #19
0
def test_l1_fit_rand_with_permissive(
    beta_d2=4.0, beta_d1=1.0, beta_seasonal=1.0, beta_step=2.5, period=12, noise=0, seed=3733, doplot=True, sea_amp=0.05
):
    # print "seed=%s,noise=%s,beta_d2=%s,beta_d1=%s,beta_step=%s," \
    #      "beta_seasonal=%s" % (seed,noise,beta_d2,beta_d1,beta_step,beta_seasonal)

    mock = make_l1tf_mock2(noise=noise, seed=seed, sea_amp=sea_amp)
    y = mock["y_with_seasonal"]
    xx = mock["x"]

    step_permissives = [(30, 0.5)]
    sol = l1_fit(
        xx,
        y,
        beta_d2=beta_d2,
        beta_d1=beta_d1,
        beta_seasonal=beta_seasonal,
        beta_step=beta_step,
        period=period,
        step_permissives=step_permissives,
    )

    if doplot:
        plt.clf()
        plt.plot(xx, y, linestyle="-", marker="o", markersize=4)
        plt.plot(xx, sol["xbase"], label="base")
        plt.plot(xx, sol["steps"], label="steps")
        plt.plot(xx, sol["seas"], label="seasonal")
        plt.plot(xx, sol["x"], label="full")
        plt.legend(loc="upper left")
예제 #20
0
def plotRocCurves(file_legend):
	pylab.clf()
	pylab.figure(1)
	pylab.xlabel('1 - Specificity', fontsize=12)
	pylab.ylabel('Sensitivity', fontsize=12)
	pylab.title("Need for Referral")
	pylab.grid(True, which='both')
	pylab.xticks([i/10.0 for i in range(1,11)])
	pylab.yticks([i/10.0 for i in range(0,11)])
	pylab.tick_params(axis="both", labelsize=15)

	for file, legend in file_legend:
		points = open(file,"rb").readlines()
		x = [float(p.split()[0]) for p in points]
		y = [float(p.split()[1]) for p in points]
		dev = [float(p.split()[2]) for p in points]
		x = [0.0] + x
		y = [0.0] + y
		dev = [0.0] + dev
	
		auc = np.trapz(y, x) * 100
		aucDev = np.trapz(dev, x) * 100

		pylab.grid()
		pylab.errorbar(x, y, yerr = dev, fmt='-')
		pylab.plot(x, y, '-', linewidth = 1.5, label = legend + u" (AUC = {0:0.1f}% \xb1 {1:0.1f}%)".format(auc,aucDev))

	pylab.legend(loc = 4, borderaxespad=0.4, prop={'size':12})
	pylab.savefig("referral/referral-curves.pdf", format='pdf')
예제 #21
0
파일: plots.py 프로젝트: orenlivne/ober
def plot_experiment_stats(e):
    sample_data = np.where(e.num_test_genotypes(SAMPLE) > 0)[0]
    c_sample = (100.0 * e.called(SAMPLE)[sample_data]) / e.num_test_genotypes(SAMPLE)[sample_data] + 1e-15
    fill = 100.*e.fill[sample_data]

    snp_data = np.where(e.num_test_genotypes(SNP) > 0)[0]
    c_snp = (100.0 * e.called(SNP)[snp_data]) / e.num_test_genotypes(SNP)[snp_data]
    
    # Call % vs. fill %
    P.figure(1);
    P.clf();
    P.plot(fill, c_sample, 'o')
    P.xlabel('Fill %')
    P.ylabel('Call %')
    P.title('Validation Breakdown by Sample, %.2f%% Deleted. r = %.2f' % 
              (100.0 * e.fraction, np.corrcoef(fill + SMALL_FLOAT, c_sample + SMALL_FLOAT)[0, 1],))

    # Call % vs. SNP
    P.figure(2);
    P.clf();
    P.plot(snp_data, c_snp, 'o')
    P.xlabel('SNP #')
    P.ylabel('Call %')
    P.title('Validation Breakdown by SNP, %.2f%% Deleted' % (100.0 * e.fraction,))
    
    return (np.array([snp_data, c_snp]).transpose(),
            np.array([sample_data, c_sample, fill]).transpose())
예제 #22
0
파일: analysis.py 프로젝트: icshih/Miosotys
 def psd(self, lc_data, n=1024, save=False):
     flux = lc_data[:,1]
     # check NaN value, and reset it to zero
     nani = np.where(np.isnan(flux) > 0)
     flux[nani] = 0.0
     
     thz = np.fft.fftfreq(n, d=self.exposuretime)
     fre = []
     for index in range(len(flux)/n):
         temp = np.fft.fft(flux[index*n:(index + 1)*n])
         fre.append(temp[1:n/2])
     fre_array = np.asarray(fre)
     avg_fre_array = np.average(fre_array, axis=0)
     power = (np.abs(avg_fre_array)**2)/thz[1:n/2]
     
     plt.clf()
     ps = plt.gca()    
     ps.plot(thz[1:n/2], power, 'k.-')
     ps.set_xscale('log')
     ps.set_yscale('log')
     ps.set_xlabel('Frequency (Hz)')
     ps.set_ylabel(r'PSD (power$^2$/frequency)')
     ps.set_title('PSD: ' + op.basename(self.filename) + r', n=%4d, $\Delta$t=%4.2f sec' % (n, self.exposuretime))
     plt.show()
     
     if save == 1:
         save_filename = 'f' + str(self.fibre) + '_' + op.basename(self.filename)[:-5] + '_psd.png'
         plt.savefig(save_filename, format='png')
예제 #23
0
def plot_average(filenames, save_plot=True, show_plot=False, dpi=100):

    ''' Plot Signal average from a list of averaged files. '''

    fname = get_files_from_list(filenames)

    # plot averages
    pl.ioff()  # switch off (interactive) plot visualisation
    factor = 1e15
    for fnavg in fname:
        name = fnavg[0:len(fnavg) - 4]
        basename = os.path.splitext(os.path.basename(name))[0]
        print fnavg
        # mne.read_evokeds provides a list or a single evoked based on condition.
        # here we assume only one evoked is returned (requires further handling)
        avg = mne.read_evokeds(fnavg)[0]
        ymin, ymax = avg.data.min(), avg.data.max()
        ymin *= factor * 1.1
        ymax *= factor * 1.1
        fig = pl.figure(basename, figsize=(10, 8), dpi=100)
        pl.clf()
        pl.ylim([ymin, ymax])
        pl.xlim([avg.times.min(), avg.times.max()])
        pl.plot(avg.times, avg.data.T * factor, color='black')
        pl.title(basename)

        # save figure
        fnfig = os.path.splitext(fnavg)[0] + '.png'
        pl.savefig(fnfig, dpi=dpi)

    pl.ion()  # switch on (interactive) plot visualisation
예제 #24
0
 def plot(self,key='Re'):
     """
     Create a plot of a variable over the ORACLES study area. 
     
     Parameters
     ----------
     key : string
     See names for available datasets to plot.
     
     clf : boolean
     If True, clear off pre-existing figure. If False, plot over pre-existing figure.
     
     Modification history
     --------------------
     Written: Michael Diamond, 08/16/2016, Seattle, WA
     Modified: Michael Diamond, 08/21/2016, Seattle, WA
        -Added ORACLES routine flight plan, Walvis Bay (orange), and Ascension Island
     Modified: Michael Diamond, 09/02/2016, Swakopmund, Namibia
         -Updated flihgt track
     """
     plt.clf()
     size = 16
     font = 'Arial'
     m = Basemap(llcrnrlon=self.lon.min(),llcrnrlat=self.lat.min(),urcrnrlon=self.lon.max(),\
     urcrnrlat=self.lat.max(),projection='merc',resolution='i')
     m.drawparallels(np.arange(-180,180,5),labels=[1,0,0,0],fontsize=size,fontname=font)
     m.drawmeridians(np.arange(0,360,5),labels=[1,1,0,1],fontsize=size,fontname=font)
     m.drawmapboundary(linewidth=1.5)        
     m.drawcoastlines()
     m.drawcountries()
     if key == 'Pbot' or key == 'Ptop' or key == 'Nd' or key == 'DZ': 
         m.drawmapboundary(fill_color='steelblue')
         m.fillcontinents(color='floralwhite',lake_color='steelblue',zorder=0)
     else: m.fillcontinents('k',zorder=0)
     if key == 'Nd':
         m.pcolormesh(self.lon,self.lat,self.ds['%s' % key],cmap=self.colors['%s' % key],\
         latlon=True,norm = LogNorm(vmin=self.v['%s' % key][0],vmax=self.v['%s' % key][1]))
     elif key == 'Zbf' or key == 'Ztf':
         levels = [0,250,500,750,1000,1250,1500,1750,2000,2500,3000,3500,4000,5000,6000,7000,8000,9000,10000]
         m.contourf(self.lon,self.lat,self.ds['%s' % key],levels=levels,\
         cmap=self.colors['%s' % key],latlon=True,extend='max')
     elif key == 'DZ':
         levels = [0,500,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500,6000,6500,7000]
         m.contourf(self.lon,self.lat,self.ds['%s' % key],levels=levels,\
         cmap=self.colors['%s' % key],latlon=True,extend='max')
     else:
         m.pcolormesh(self.lon,self.lat,self.ds['%s' % key],cmap=self.colors['%s' % key],\
         latlon=True,vmin=self.v['%s' % key][0],vmax=self.v['%s' % key][1])
     cbar = m.colorbar()
     cbar.ax.tick_params(labelsize=size-2) 
     cbar.set_label('[%s]' % self.units['%s' % key],fontsize=size,fontname=font)
     if key == 'Pbot' or key == 'Ptop': cbar.ax.invert_yaxis() 
     m.scatter(14.5247,-22.9390,s=250,c='orange',marker='D',latlon=True)
     m.scatter(-14.3559,-7.9467,s=375,c='c',marker='*',latlon=True)
     m.scatter(-5.7089,-15.9650,s=375,c='chartreuse',marker='*',latlon=True)
     m.plot([14.5247,13,0],[-22.9390,-23,-10],c='w',linewidth=5,linestyle='dashed',latlon=True)
     m.plot([14.5247,13,0],[-22.9390,-23,-10],c='k',linewidth=3,linestyle='dashed',latlon=True)
     plt.title('%s from MSG SEVIRI on %s/%s/%s at %s UTC' % \
     (self.names['%s' % key],self.month,self.day,self.year,self.time),fontsize=size+4,fontname=font)
     plt.show()
예제 #25
0
def plotLSQ(C_ms,lsqSc,lsqMu,lsqSl,LSQ,viewDirectory,TextSize=16):
  C_MS = C_ms - 1.0
  #480x480
  myfile = os.path.join(viewDirectory,'lsq.png')
  title='Cumulative LSQ Penalties' 
  red = 'S(Q,E)'
  green = 'high E scatter'
  blue = 'slope of high E scatter'
  xlabel = r"$C_{ms}$ [unitless]"
  ylabel = "Cumulative Penalty [unitless]"
  pylab.clf()
  pylab.plot( C_MS, lsqSc, 'r-', linewidth=5 )
  pylab.plot( C_MS, lsqMu, 'g-', linewidth=5 )
  pylab.plot( C_MS, lsqSl, 'b-', linewidth=5 )
  pylab.grid( 1 )
  pylab.legend( (red, green, blue), loc="upper left" )
  pylab.xlabel( xlabel )
  pylab.ylabel( ylabel )
  pylab.title(title)
  pylab.savefig( myfile )
  
  myfile = os.path.join(viewDirectory,'lsq_f.png')
  pylab.clf()
  title='Final Cumulative LSQ Penalty'
  pylab.plot( C_MS,LSQ, 'b-', linewidth = 5)
  pylab.xlabel( xlabel )
  pylab.ylabel( ylabel )
  pylab.title( title )
  pylab.grid(1)
  pylab.savefig( myfile )
  return
예제 #26
0
    def plot_df(self,show=False):
        from matplotlib import pylab as plt 

        if self.afp is None:
            print 'afp not initilized. call update afp'
            return -1

        linecords,td,df,rtn,minmaxy = self.afp

        formatter = PlotDateFormatter(df.index)
        #fig = plt.figure()
        #ax = plt.addsubplot()
        fig, ax = plt.subplots()
        ax.xaxis.set_major_formatter(formatter)
    
        ax.plot(np.arange(len(df)), df['p'])

        for cord in linecords:
            plt.plot(cord[0],cord[1],color='red')

        fig.autofmt_xdate()
        plt.xlim(-10,len(df.index) + 10)
        plt.ylim(df.p.min() - 10,df.p.max() + 10)
        plt.grid(ax)
        #if show:
        #    plt.show()
        
        #"{0}{1}.png".format("./data/",datetime.datetime.strftime(datetime.datetime.now(),'%Y%M%m%S'))
        if self.plot_file:
            save_path = self.plot_file.format(self.symbol)
            if os.path.exists(os.path.dirname(save_path)):
                plt.savefig(save_path)
                
        plt.clf()
        plt.close()
예제 #27
0
def plotRocCurves(lesion, lesion_en):
	file_legend = []
	for techniqueMid in techniquesMid:
		for techniqueLow in techniquesLow:
			file_legend.append((directory + techniqueLow + "/" + techniqueMid + "/operating-points-" + lesion + "-scale.dat", "Low-level: " + techniqueLow + ". Mid-level: " + techniqueMid + "."))
			
			pylab.clf()
			pylab.figure(1)
			pylab.xlabel('1 - Specificity', fontsize=12)
			pylab.ylabel('Sensitivity', fontsize=12)
			pylab.title(lesion_en)
			pylab.grid(True, which='both')
			pylab.xticks([i/10.0 for i in range(1,11)])
			pylab.yticks([i/10.0 for i in range(0,11)])
			#pylab.tick_params(axis="both", labelsize=15)
			
			for file, legend in file_legend:
				points = open(file,"rb").readlines()
				x = [float(p.split()[0]) for p in points]
				y = [float(p.split()[1]) for p in points]
				x.append(0.0)
				y.append(0.0)
				
				auc = numpy.trapz(y, x) * -100

				pylab.grid()
				pylab.plot(x, y, '-', linewidth = 1.5, label = legend + u" (AUC = {0:0.1f}%)".format(auc))

	pylab.legend(loc = 4, borderaxespad=0.4, prop={'size':12})
	pylab.savefig(directory + "plots/" + lesion + ".pdf", format='pdf')
예제 #28
0
    def plotFittingResults(self):
        """
        Plot results of Rmax optimization procedure and best fit of the experimental data
        """
        _listFitQ = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitQ()]
        _listFitValues = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitValues()]
        _listExpQ = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataQ()]
        _listExpValues = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataValues()]

        #_listExpStdDev = None
        #if self.getDataInput().getExperimentalDataStdDev():
        #    _listExpStdDev = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataStdDev()]
        #if _listExpStdDev:
        #    pylab.errorbar(_listExpQ, _listExpValues, yerr=_listExpStdDev, linestyle='None', marker='o', markersize=1,  label="Experimental Data")
        #    pylab.gca().set_yscale("log", nonposy='clip')
        #else:         
        #    pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5,  label="Experimental Data")

        pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5, label="Experimental Data")
        pylab.semilogy(_listFitQ, _listFitValues, label="Fitting curve")
        pylab.xlabel('q')
        pylab.ylabel('I(q)')
        pylab.suptitle("RMax : %3.2f. Fit quality : %1.3f" % (self.getDataInput().getRMax().getValue(), self.getDataOutput().getFitQuality().getValue()))
        pylab.legend()
        pylab.savefig(os.path.join(self.getWorkingDirectory(), "gnomFittingResults.png"))
        pylab.clf()
def check_HDF5(size=64):
    """
    Plot images with landmarks to check the processing
    """

    # Get hdf5 file
    hdf5_file = os.path.join(data_dir, "CelebA_%s_data.h5" % size)

    with h5py.File(hdf5_file, "r") as hf:
        data_color = hf["training_color_data"]
        data_lab = hf["training_lab_data"]
        data_black = hf["training_black_data"]
        for i in range(data_color.shape[0]):
            fig = plt.figure()
            gs = gridspec.GridSpec(3, 1)
            for k in range(3):
                ax = plt.subplot(gs[k])
                if k == 0:
                    img = data_color[i, :, :, :].transpose(1,2,0)
                    ax.imshow(img)
                elif k == 1:
                    img = data_lab[i, :, :, :].transpose(1,2,0)
                    img = color.lab2rgb(img)
                    ax.imshow(img)
                elif k == 2:
                    img = data_black[i, 0, :, :] / 255.
                    ax.imshow(img, cmap="gray")
            gs.tight_layout(fig)
            plt.show()
            plt.clf()
            plt.close()
예제 #30
0
def visualize_representations(dir, nlayers):
    pylab.clf()

    n_subplot_vertical = nlayers + 1
    n_subplot_horizontal = 4

    # input
    location = 1 + nlayers * n_subplot_horizontal + 1
    filename = dir + "representations/input.txt"
    plot_representation(n_subplot_vertical, n_subplot_horizontal, location, filename, "x")

    #
    for i in range(nlayers):
        # reconstruction of input
        location = 1 + (nlayers - i) * n_subplot_horizontal
        filename = dir + "representations/recons_from_hidden_l" + str(i) + ".txt"
        plot_representation(n_subplot_vertical, n_subplot_horizontal, location, filename, "rebuilt from h")

        # hidden layer
        location = 1 + (nlayers - i - 1) * n_subplot_horizontal + 1
        filename = dir + "representations/hidden_l" + str(i) + ".txt"
        plot_representation(n_subplot_vertical, n_subplot_horizontal, location, filename, "hidden")

        # reconstruction of layer through speech
        location = 1 + (nlayers - i - 1) * n_subplot_horizontal + 2
        filename = dir + "representations/recons_from_speech_l" + str(i) + ".txt"
        plot_representation(n_subplot_vertical, n_subplot_horizontal, location, filename, "rebuilt from s")

        # speech
        location = 1 + (nlayers - i - 1) * n_subplot_horizontal + 3
        filename = dir + "representations/speech_l" + str(i) + ".txt"
        plot_representation(n_subplot_vertical, n_subplot_horizontal, location, filename, "speech")

    pylab.savefig(dir + "representations.png")
예제 #31
0
def plot_swe(run_id, met_inp, which_model, hydro_years_to_take, catchment,
             output_dem, origin, model_swe_sc_threshold, mask_file,
             dsc_snow_output_folder, plot_folder):

    bounding = 'moving'
    lats = np.linspace(
        48e5, 50e5, 365
    )  # asssume plot is 5e5 high, 4e5 wide    lats = np.linspace(47e5, 52e5, 365) # asssume plot is 5e5 high, 4e5 wide
    lons = np.linspace(11e5, 12e5,
                       365)  #    lons = np.linspace(10e5, 15e5, 365)
    camera_motion = [lats, lons]

    plt.rcParams.update({'font.size': 14})
    plt.rcParams.update({'axes.titlesize': 14})

    # bin_edges = [-0.001, 30, 60, 90, 120, 180, 270, 360]  # use small negative number to include 0 in the interpolation

    for i, year_to_take in enumerate(hydro_years_to_take):
        modis_mask = np.load(mask_file)
        fig1 = plt.figure(figsize=[8, 6])
        print('loading data for year {}'.format(year_to_take))
        nc_file = nc.Dataset(
            model_output_folder + '/snow_out_{}_{}_{}_{}_{}_{}.nc'.format(
                met_inp, which_model, catchment, output_dem, run_id,
                year_to_take), 'r')
        nztm_dem = nc_file.variables['elevation'][:]
        x_centres = nc_file.variables['easting'][:]
        y_centres = nc_file.variables['northing'][:-116]
        out_dt = nc.num2date(
            nc_file.variables['time'][:], nc_file.variables['time'].units
        )  #, only_use_cftime_datetimes=False,                             only_use_python_datetimes=True
        for i in range(nc_file.variables['swe'].shape[0]):

            swe = np.log(nc_file.variables['swe'][i, :-116, :])
            swe[np.isinf(swe)] = 0  # take for NZ
            swe[modis_mask == False] = np.nan

            CS1 = plt.pcolormesh(x_centres,
                                 y_centres,
                                 swe,
                                 cmap=copy.copy(plt.cm.get_cmap('viridis')),
                                 vmin=1.61,
                                 vmax=8.52)  #levels=bin_edges, extend='max'
            #CS1.cmap.set_bad('grey')
            #CS1.cmap.set_under('grey')
            #CS1.cmap.set_under('grey')
            plt.gca().set_aspect('equal')
            # plt.imshow(modis_scd, origin=0, interpolation='none', vmin=0, vmax=365, cmap='magma_r')
            plt.xticks([])
            plt.yticks([])
            cbar = plt.colorbar(CS1, ticks=np.log([5, 50, 500, 5000]))
            cbar.set_ticklabels([5, 50, 500, 5000])
            cbar.set_label('Snow water equivalent (mm w.e.)', rotation=90)
            plt.xticks(np.arange(12e5, 21e5, 2e5))
            plt.yticks(np.arange(48e5, 63e5, 2e5))
            if bounding == 'moving':
                plt.ylim((camera_motion[0][i], camera_motion[0][i] + 4e5))
                plt.xlim((camera_motion[1][i], camera_motion[1][i] + 5e5))
            plt.ticklabel_format(axis='both', style='sci', scilimits=(0, 0))
            plt.ylabel('NZTM northing')
            plt.xlabel('NZTM easting')
            plt.title('SWE {}'.format(out_dt[i]))
            plt.tight_layout()

            plt.savefig(
                plot_folder + '/SWE model {} thres{} {} {} {} {}.png'.format(
                    year_to_take, model_swe_sc_threshold, run_id, met_inp,
                    which_model, i),
                dpi=150)
            # plt.show()
            plt.clf()
예제 #32
0
    def handle(self, *args, **kwargs):
        try:
            self.options = map2obj(kwargs)

            if self.options.enable_preclustering and self.options.enable_postclustering:
                logging.info('both pre and post clustering enabled. invalid options.')
                return

            X = self.load(self.options.target, self.options.dataid)
            print 'X shape: ', X.shape

            labels = []
            n_clusters_ = 0
            if self.options.enable_preclustering:
                bandwidth = estimate_bandwidth(X, quantile=self.options.quantile, n_samples=500)
                ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
                ms.fit(X)
                labels = ms.labels_
                # cluster_centers = ms.cluster_centers_
                labels_unique = numpy.unique(labels)
                n_clusters_ = len(labels_unique)
                print 'n_clusters_: ', n_clusters_

            self.fig = plt.figure(1, figsize=(8, 6))
            plt.clf()
            #ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
            self.ax = Axes3D(self.fig)

            plt.cla()
            pca = decomposition.PCA(n_components=3)
            pca.fit(X)
            X = pca.transform(X)

            if self.options.enable_postclustering:
                bandwidth = estimate_bandwidth(X, quantile=self.options.quantile, n_samples=500)
                ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
                ms.fit(X)
                labels = ms.labels_
                # cluster_centers = ms.cluster_centers_
                labels_unique = numpy.unique(labels)
                n_clusters_ = len(labels_unique)
                print 'n_clusters_: ', n_clusters_

            if self.options.enable_preclustering or self.options.enable_postclustering:
                # Reorder the labels to have colors matching the cluster results
                colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
                for k, col in zip(range(n_clusters_), colors):
                    my_members = labels == k
                    # cluster_center = cluster_centers[k]
                    self.ax.scatter(
                        X[my_members, 0],
                        X[my_members, 1],
                        X[my_members, 2],
                        c=col,
                        cmap=plt.cm.spectral
                    )
                    print '[Cluster{}] {}'.format(k, sum(my_members))
            else:
                self.ax.scatter(X[:, 0], X[:, 1], X[:, 2], c='b', cmap=plt.cm.spectral)

            if self.options.enable_animation:
                anim = animation.FuncAnimation(
                    self.fig,
                    self.animate,
                    frames=int(360 / self.options.azimuth_interval)
                )
                anim.save(self.options.gif, writer='imagemagick', fps=self.options.fps)
                if self.options.enable_shrink:
                    self.shrink_gif()
            plt.show()
        except:
            logging.error(traceback.format_exc())
예제 #33
0
M /= M.max()

#%% plot the distributions

pl.figure(1)
for i in range(nbd):
    pl.plot(x, A[:, i])
pl.title('Distributions')

#%% barycenter computation

# l2bary
bary_l2 = A.mean(1)

# wasserstein
reg = 1e-3
bary_wass = ot.bregman.barycenter(A, M, reg)

pl.figure(2)
pl.clf()
pl.subplot(2, 1, 1)
for i in range(nbd):
    pl.plot(x, A[:, i])
pl.title('Distributions')

pl.subplot(2, 1, 2)
pl.plot(x, bary_l2, 'r', label='l2')
pl.plot(x, bary_wass, 'g', label='Wasserstein')
pl.legend()
pl.title('Barycenters')
예제 #34
0
파일: fitting.py 프로젝트: bmewjw/pyHegel
def fitplot(func,
            x,
            y,
            p0,
            yerr=None,
            extra={},
            sel=None,
            fig=None,
            skip=False,
            hold=False,
            col_fit='red',
            col_data=None,
            col_unsel_data=None,
            label=None,
            result_loc='upper right',
            xpts=1000,
            xlabel=None,
            ylabel=None,
            title_fmt={},
            **kwarg):
    """
    This does the same as fitcurve (see its documentation)
    but also plots the data, the fit on the top panel and
    the difference between the fit and the data on the bottom panel.
    sel selects the point to use for the fit. The unselected points are plotted
        in a color closer to the background.
    xpts selects the number of points to use for the xscale (from min to max)
         or it can be a tuple (min, max, npts)
         or it can also be a vector of points to use directly
         or it can be 'reuse' to reuse the x data.

    fig selects which figure to use. By default it uses the currently active one.
    skip when True, prevents the fitting. This is useful when trying out initial
         parameters for the fit. In this case, the returned values are
         (chi2, chiNorm)
    hold: when True, the plots are added on top of the previous ones.
    xlabel, ylabel: when given, changes the x and y labels.
    title_fmt is the options used to display the functions.
           For example to have a larger title you can use:
           title_fmt = dict(size=20)
    col_fit, col_data, col_unsel_data: are respectivelly the colors for the
           fit, the (selected) data and the unselected data.
           By default, the fit is red, the others cycle.
    label is a string used to label the curves. 'data' or 'fit' is appended
    result_loc when not None, is the position where the parameters will be printed.
            the box is draggable.
               see plotResult
    On return the active axis is the main one.
    """
    if fig:
        fig = plt.figure(fig)
    else:
        fig = plt.gcf()
    if title_fmt.get('verticalalignment', None) is None:
        # This prevents the title from leaking into the axes.
        title_fmt['verticalalignment'] = 'bottom'
    if not hold:
        plt.clf()
    if label is not None:
        data_label = label + ' data'
        fit_label = label + ' fit'
    else:
        data_label = 'data'
        fit_label = 'fit'
    fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, num=fig.number)
    ax1.set_position([.125, .30, .85, .60])
    ax2.set_position([.125, .05, .85, .20])
    if not hold and col_fit == 'red':
        # skip red from color cycle.
        # here it gets hard coded
        ax1.set_color_cycle(['b', 'g', 'c', 'm', 'y', 'k'])
    if col_data is None:
        # This is a bit of a hack, a matplotlib update could break this.
        # Another way would be to create a plot, use get_color() on it and remove the plot.
        col_data = ax1._get_lines.color_cycle.next()
    if col_fit is None:
        col_fit = ax1._get_lines.color_cycle.next()
    if col_unsel_data is None:
        col = matplotlib.colors.colorConverter.to_rgb(col_data)
        bgcol = matplotlib.colors.colorConverter.to_rgb(ax1.get_axis_bgcolor())
        # move halfway between col abd bgcol
        col = np.array(col)
        bgcol = np.array(bgcol)
        col_unsel_data = tuple((col + bgcol) / 2.)
    if xlabel is not None:
        ax1.set_xlabel(xlabel)
    if ylabel is not None:
        ax1.set_ylabel(ylabel)
    plt.sca(ax1)  # the current axis on return
    if sel is not None:
        xsel = x[sel]
        ysel = y[sel]
        if yerr is not None and np.asarray(yerr).size > 1:
            yerrsel = yerr[sel]
        else:
            yerrsel = yerr
        ax1.errorbar(x,
                     y,
                     yerr=yerr,
                     fmt='.',
                     label='_nolegend_',
                     color=col_unsel_data)
    else:
        xsel = x
        ysel = y
        yerrsel = yerr
    if xpts == 'reuse':
        xx = x
    elif isinstance(xpts, (list, np.ndarray)):
        xx = xpts
    elif isinstance(xpts, tuple):
        xx = np.linspace(xpts[0], xpts[1], xpts[2])
    else:
        xx = np.linspace(x.min(), x.max(), xpts)
    err_func = lambda x, y, p: func(x, *p, **extra) - y
    if skip:
        pld1 = ax1.errorbar(xsel,
                            ysel,
                            yerr=yerrsel,
                            fmt='.',
                            label=data_label,
                            color=col_data)
        pld2 = ax2.errorbar(xsel,
                            err_func(xsel, ysel, p0),
                            yerr=yerrsel,
                            fmt='.',
                            label=data_label,
                            color=col_data)
    pl = ax1.plot(xx,
                  func(xx, *p0, **extra),
                  '-',
                  color=col_fit,
                  label=fit_label)[0]
    try:
        plt.title(func.display_str, **title_fmt)
    except AttributeError:  # No display_str
        pass
    fig.canvas.draw()
    #plt.draw()
    if not skip:
        p, resids, pe, extras = fitcurve(func,
                                         xsel,
                                         ysel,
                                         p0,
                                         yerr=yerrsel,
                                         extra=extra,
                                         **kwarg)
        printResult(func, p, pe, extra=extra)
        #pld1.remove()
        ax1.errorbar(xsel,
                     ysel,
                     yerr=extras['s'],
                     fmt='.',
                     label=data_label,
                     color=col_data)
        pl.set_ydata(func(xx, *p, **extra))
        ax1.relim()  # this is needed for the autoscaling to use the new data
        #pld2.remove()
        ax2.errorbar(xsel,
                     err_func(xsel, ysel, p),
                     yerr=extras['s'],
                     fmt='.',
                     label=data_label,
                     color=col_data)
        ax2.autoscale_view(scalex=False)  # only need to rescale y
        ax1.autoscale_view(scalex=False)  # only need to rescale y
        for t in ax1.texts:
            if isinstance(t, matplotlib.text.Annotation):
                t.remove()
                break
        if result_loc is not None:
            plotResult(func, p, pe, extra=extra, loc=result_loc, ax=ax1)
        fig.canvas.draw()
        #plt.draw()
        return p, resids, pe, extras
    else:
        if yerrsel is None:
            yerrsel = 1
        f = lambda p, x, y, yerr: ((func(x, *p, **extra) - y) / yerr)**2.
        chi2 = np.sum(f(p0, xsel, ysel, yerrsel))
        Ndof = len(xsel) - len(p0)
        chiNorm = chi2 / Ndof
        return chi2, chiNorm
        # python learn_ktsne.py 1 250 2 0.5 0.1 1000 1
        from usps import get_usps_split
        i=0

        (X_train, Y_train), (X_test, Y_test) = get_usps_split(1) 

        np.random.seed(seed)

        X_train = X_train[:num_cases]
        Y_train = Y_train[:num_cases] 

        [N, D] = X_train.shape
        print 'USPS, num_examples=%d, num_dim=%d' % (N, D) 

        Z = train_sgd(X_train,Y_train,dataset_name,P,k, num_iters=num_iters, perplexity=perplexity, eta=eta,mo=.9,L2=L2, batch_size=N,seed=seed)

        plt.clf()
        plt.hold(True)
        syms = ['bo', 'ro', 'go', 'bx', 'rx', 'gx', 'b<', 'r<', 'g<', 'k.']
        assert len(syms)==10
        for i,sym in zip(range(10),syms):
            I = (Y_train==i)
            plt.plot(Z[I,0], Z[I,1], sym)

        plt.title("USPS -- K=%s" % k)
        fig_filename = 'usps_K=%d' % (k)
        plt.savefig(fig_filename + '.png' )
        plt.savefig(fig_filename + '.pdf' )
        plt.savefig(fig_filename + '.eps' )
        plt.show()
예제 #36
0
print 'Find %d files'%(len(fl))
N1=16;N2=128;N3=N2

#b=np.zeros((N1,N2,N3))
for f in fl:
    print 'Working on '+f
    a=np.fromfile(f,dtype='float32')
    if (len(a)==(1024*1024*1024)):
      a=np.reshape(a,(1024,1024,1024))  # freq in first axis
      #a=a.T ## Check this
      aa=a.reshape(N1,1024/N1,N2,1024/N2,N3,1024/N3)
      b=aa.mean(5).mean(3).mean(1)
      n=0
      pl.figure(1)
      pl.clf()
      pl.imshow(a[n])
      pl.title('Original plane %d'%(n))
      pl.figure(2)
      pl.clf()
      n=int(n*N1)/1024
      pl.imshow(b[n])
      pl.title('Sum plane %d: %s'%(n,f))
      pl.pause(0.1)
      fo=f+'.new'
      print 'Writing '+fo
      fo=open(fo,'wb')
      fo.write(b)
      fo.close()

fl=glob.glob('delta_T*Mpc.new')
예제 #37
0
                                         (1 + eps * X[n - 1])**2) *
                    (P[n - 1] / P0)**2)


def dpdw(n):
    return (-alpha / 2) * ((P0**2) / P[n - 1]) * (1 + eps * X[n - 1])


P[0] = 8.2
X[0] = 0

for n in range(1, M + 1):
    X[n] = X[n - 1] + dw * dxdw(n)
    P[n] = P[n - 1] + dw * dpdw(n)

plot.clf()
plot.figure(1)
plot.plot(w * 1e6, X)
plot.title("X(W)")
plot.xlabel("W * 10^6 [kg]")
plot.ylabel("Conversion")
print("Final conversion:{}".format(X[-1]))
plot.figure(2)
plot.plot(w * 1e6, P)
plot.title("P(W)")
plot.xlabel("W * 10^6 [kg]")
plot.ylabel("Pressure [atm]")
print("Final pressure:{}".format(P[-1]))

plot.figure(3)
plot.plot(w * 1e6, Fa0 * (1 - X) * 1e5)
예제 #38
0
                     np.sqrt(np.sum(Xrd**2,axis=0)*np.sum(Yrd**2,axis=0))
        ret['spearman'] = spearman
    return ret
    

# test/example routine
if __name__ == "__main__":
    from matplotlib.pylab import figure, clf, plot, legend, show, ylim
    #from pretty import pprint
    from pprint import pprint
    N=500
    x=np.linspace(0,1,N)
    y=3+5*x**2+np.random.randn(N)*.1
    y[200]=100
    figure(1)
    clf()
    plot(x,y,label='data')
    ss=y*0+1.
    ER=True
    #ER=False
    (pf,res,pe,extras) = gen_polyfit(x,y,3,errors=ER)
    plot(x,gen_polyeval(x,pf),'g',label='fit no s')
    pprint (('fit no s',pf[0],pe,res,extras))
    pprint (report(x,y,pf))
    (pf,res,pe,extras) = gen_polyfit(x,y,3,s=10,errors=ER)
    plot(x,gen_polyeval(x,pf),'r',label='fit constant s')
    pprint (('fit constant s',pf[0],pe,res,extras))
    pprint ( report(x,y,pf,s=10) )
    ss[200]=100
    (pf,res,pe,extras) = gen_polyfit(x,y,3,s=ss,errors=ER)
    plot(x,gen_polyeval(x,pf),'k', label='fit with s')
예제 #39
0
    def plot_loci(self,loci,filename,flank_limit=2):
        '''
            Plots the loci, windows and candidate genes

            Parameters
            ----------
            loci : iterable of co.Loci
                The loci to print
            filename : str
                The output filename
        '''
        plt.clf()
        # Each chromosome gets a plot
        chroms = set([x.chrom for x in loci])
        # Create a figure with a subplot for each chromosome 
        f, axes = plt.subplots(len(chroms),figsize=(10,4*len(chroms)))
        # Split loci by chromosome
        chromloci = defaultdict(list)
        for locus in sorted(loci):
            chromloci[locus.chrom].append(locus)
        # iterate over Loci
        seen_chroms = set([loci[0].chrom])
        voffset = 1 # Vertical Offset
        hoffset = 0 # Horizonatal Offset
        current_chrom = 0
        for i,locus in enumerate(loci):
            # Reset the temp variables in necessary
            if locus.chrom not in seen_chroms:
                seen_chroms.add(locus.chrom)
                current_chrom += 1
                voffset = 1
                hoffset = 0
            # Do the access things
            cax = axes[current_chrom]
            cax.set_ylabel('Chrom: '+ locus.chrom)
            cax.set_xlabel('Loci')
            cax.get_yaxis().set_ticks([])
            #cax.get_xaxis().set_ticks([])
            # shortcut for current axis
            cax.hold(True)
            # place marker for start window
            cax.scatter(hoffset,voffset,marker='>')
            # place marker for start snp
            cax.scatter(hoffset+locus.window,voffset,marker='.',color='blue')
            # place marker for stop snp
            cax.scatter(hoffset+locus.window+len(locus),voffset,marker='.',color='blue')
            # place marker for stop snp
            cax.scatter(hoffset+locus.window+len(locus)+locus.window,voffset,marker='<')

            # place markers for sub snps
            #for subsnp in locus.sub_loci:
            #    cax.scatter(
            #        hoffset + subsnp.start - locus.start + locus.window,
            #        voffset,
            #        marker='.',
            #        color='blue'
            #    )

            # place a block for interlocal distance
            cax.barh(
                bottom=voffset,
                width=50,
                height=1,
                left=hoffset+locus.window+len(locus)+locus.window,
                color='red'
            )
            # grab the candidate genes
            for gene in self.candidate_genes(locus,flank_limit=flank_limit):
                cax.barh(
                    bottom=voffset,
                    width = len(gene),
                    height= 1,
                    left=gene.start-locus.start+locus.window,
                    color='red'
                )
            voffset += 5

        plt.savefig(filename)
        return f
예제 #40
0
def create_schedule(n_days,
                    inventory_start,
                    n_totes_washed_start,
                    pars=None,
                    do_plot=True,
                    verbose=True):
    """
    Demo an optimal supply chain scheduling with variable
    labor costs, and the concept of totes that hold a number of
    products. Totes need to be cleaned on a regular basis.
    :param pars: parameters from create_default_params
    :param do_plot: True if you want a plot created (default)
    :return: None
    """
    if pars is None:
        pars = create_default_params()

    days = np.arange(n_days)

    print 'creating demand'
    demand = create_demand(days)
    labor_costs = get_labor_costs(days, pars)

    # define variables which keep track of
    # production, inventory and number of totes washed per day

    print 'defining variables'
    production = Variable(n_days)
    sales = Variable(n_days)
    inventory = Variable(n_days)
    n_totes_washed = Variable(n_days)

    print 'calculating costs and profit'
    # calculate when the totes that were washed become dirty again
    shift_matrix = mu.time_shift_matrix(n_days, pars['days_until_cleaning'])

    n_totes_become_dirty = (shift_matrix * n_totes_washed)[:n_days]

    # calculate the number of clean totes on any day
    cum_matrix = mu.cumulative_matrix(n_days)

    n_washed_totes_available = n_totes_washed_start \
        + cum_matrix*(n_totes_washed - n_totes_become_dirty)

    print 'calculating total cost'

    # Minimize total cost which is
    # sum of labor costs, storage costs and washing costs

    total_cost = production.T*labor_costs + \
                 pars['storage_cost'] * sum(inventory) + \
                 pars['washing_tote_cost'] * sum(n_totes_washed)

    total_profit = pars['sales_price'] * sum(sales) - total_cost

    print 'defining objective'
    objective = Maximize(total_profit)

    # Subject to these constraints

    constraints = make_constraints(production, sales, inventory, pars,
                                   n_washed_totes_available, n_totes_washed,
                                   demand, inventory_start)

    # define the problem and solve it

    problem = Problem(objective, constraints)

    solver = 'cvxpy'
    print 'solving with: %s' % solver
    start = time()
    problem.solve(verbose=verbose)

    finish = time()
    run_time = finish - start
    print 'Solve time: %s seconds' % run_time

    print "Status: %s" % problem.status
    if problem.status == 'infeasible':
        print "Problem is infeasible, no solution found"
        return

    n_items_sold = sum(sales.value)
    total_cost = problem.value
    total_washing_cost = pars['washing_tote_cost'] * sum(n_totes_washed.value)
    total_labor_cost = (production.T * labor_costs).value
    total_storage_cost = sum(inventory.value) * pars['storage_cost']
    total_cost_per_item = problem.value / n_items_sold

    print "Total cost: %s" % total_cost
    print "Total labor cost: %s" % total_labor_cost
    print "Total washing cost: %s" % total_washing_cost
    print "Total storage cost: %s" % total_storage_cost
    print "Total cost/item: %s" % total_cost_per_item
    print "Total profit: %s" % total_profit.value

    if do_plot:
        plot_variables(days, production, inventory, sales, demand,
                       n_washed_totes_available)
        plt.clf()
예제 #41
0
    def create_stochastic_blockmodel_graph(self, blocks=10, size=100, self_block_connectivity=0.9, other_block_connectivity=0.1, connectivity_matrix=None, directed=False,
                                           self_edges=False, power_exp=None, scale=None, plot_stat=False):
        size = size if isinstance(size, list) else [size]
        self_block_connectivity = self_block_connectivity if isinstance(self_block_connectivity, list) else [self_block_connectivity]
        other_block_connectivity = other_block_connectivity if isinstance(other_block_connectivity, list) else [other_block_connectivity]

        num_nodes = sum([size[i % len(size)] for i in xrange(blocks)])
        if power_exp is None:
            self.print_f("Starting to create Stochastic Blockmodel Graph with {} nodes and {} blocks".format(num_nodes, blocks))
        else:
            self.print_f("Starting to create degree-corrected (alpha=" + str(power_exp) + ") Stochastic Blockmodel Graph with {} nodes and {} blocks".format(num_nodes, blocks))
        self.print_f('convert/transform probabilities')
        blocks_range = range(blocks)
        block_sizes = np.array([size[i % len(size)] for i in blocks_range])

        # create connectivity matrix of self- and other-block-connectivity
        if connectivity_matrix is None:
            connectivity_matrix = []
            self.print_f('inner conn: ' + str(self_block_connectivity) + '\tother conn: ' + str(other_block_connectivity))
            for idx in blocks_range:
                row = []
                for jdx in blocks_range:
                    if idx == jdx:
                        row.append(self_block_connectivity[idx % len(self_block_connectivity)])
                    else:
                        if scale is not None:
                            prob = other_block_connectivity[idx % len(other_block_connectivity)] / (num_nodes - block_sizes[idx]) * block_sizes[jdx]
                            if directed:
                                row.append(prob)
                            else:
                                row.append(prob / 2)
                        else:
                            row.append(other_block_connectivity[idx % len(other_block_connectivity)])
                connectivity_matrix.append(row)

        # convert con-matrix to np.array
        if connectivity_matrix is not None and isinstance(connectivity_matrix, np.matrix):
            connectivity_matrix = np.asarray(connectivity_matrix)

        # convert con-matrix to np.array
        if connectivity_matrix is not None and not isinstance(connectivity_matrix, np.ndarray):
            connectivity_matrix = np.array(connectivity_matrix)

        self.print_f('conn mat')
        printing.print_matrix(connectivity_matrix)

        if scale == 'relative' or scale == 'absolute':
            new_connectivity_matrix = []
            for i in blocks_range:
                connectivity_row = connectivity_matrix[i, :] if connectivity_matrix is not None else None
                nodes_in_src_block = block_sizes[i]
                multp = 1 if scale == 'absolute' else (nodes_in_src_block * (nodes_in_src_block - 1))
                row_prob = [(connectivity_row[idx] * multp) / (nodes_in_src_block * (nodes_in_block - 1)) for idx, nodes_in_block in enumerate(block_sizes)]
                new_connectivity_matrix.append(np.array(row_prob))
            connectivity_matrix = np.array(new_connectivity_matrix)
            self.print_f(scale + ' scaled conn mat:')
            printing.print_matrix(connectivity_matrix)

        # create nodes and store corresponding block-id
        self.print_f('insert nodes')
        vertex_to_block = []
        appender = vertex_to_block.append
        colors = self.graph.new_vertex_property("float")
        for i in xrange(blocks):
            block_size = size[i % len(size)]
            for j in xrange(block_size):
                appender((self.graph.add_vertex(), i))
                node = vertex_to_block[-1][0]
                colors[node] = i

        # create edges
        get_rand = np.random.random
        add_edge = self.graph.add_edge

        self.print_f('create edge probs')
        degree_probs = defaultdict(lambda: dict())
        for vertex, block_id in vertex_to_block:
            if power_exp is None:
                degree_probs[block_id][vertex] = 1
            else:
                degree_probs[block_id][vertex] = math.exp(power_exp * np.random.random())

        tmp = dict()
        self.print_f('normalize edge probs')
        all_prop = []
        for block_id, node_to_prop in degree_probs.iteritems():
            sum_of_block_norm = 1 / sum(node_to_prop.values())
            tmp[block_id] = {key: val * sum_of_block_norm for key, val in node_to_prop.iteritems()}
            all_prop.append(tmp[block_id].values())
        degree_probs = tmp
        if plot_stat:
            plt.clf()
            plt.hist(all_prop, bins=15)
            plt.savefig("prop_dist.png")
            plt.close('all')

        self.print_f('count edges between blocks')
        edges_between_blocks = defaultdict(lambda: defaultdict(int))
        for idx, (src_node, src_block) in enumerate(vertex_to_block):
            conn_mat_row = connectivity_matrix[src_block, :]
            for dest_node, dest_block in vertex_to_block:
                if get_rand() < conn_mat_row[dest_block]:
                    edges_between_blocks[src_block][dest_block] += 1

        self.print_f('create edges')
        for src_block, dest_dict in edges_between_blocks.iteritems():
            self.print_f(' -- Processing Block {}. Creating links to: {}'.format(src_block, dest_dict))
            for dest_block, num_edges in dest_dict.iteritems():
                self.print_f('   ++ adding {} edges to {}'.format(num_edges, dest_block))
                for i in xrange(num_edges):
                    # find src node
                    prob = np.random.random()
                    prob_sum = 0
                    src_node = None
                    for vertex, v_prob in degree_probs[src_block].iteritems():
                        prob_sum += v_prob
                        if prob_sum >= prob:
                            src_node = vertex
                            break
                    # find dest node
                    prob = np.random.random()
                    prob_sum = 0
                    dest_node = None
                    for vertex, v_prob in degree_probs[dest_block].iteritems():
                        prob_sum += v_prob
                        if prob_sum >= prob:
                            dest_node = vertex
                            break
                    if src_node is None or dest_node is None:
                        print 'Error selecting node:', src_node, dest_node
                    if self.graph.edge(src_node, dest_node) is None:
                        if self_edges or not src_node == dest_node:
                            add_edge(src_node, dest_node)
        self.graph.vertex_properties["colorsComm"] = colors
        return self.return_and_reset()
예제 #42
0
a.vw /= utau2

wind_dir = 180.0 + np.arctan2(a.u, a.v) * 180.0 / np.pi

pylab.plot(np.sqrt(np.multiply(a.u, a.u) + np.multiply(a.v, a.v)),
           a.z,
           label='|U|')
pylab.plot(a.u, a.z, label=r'$U_x$')
pylab.plot(a.v, a.z, label=r'$U_y$')
pylab.xlabel("Velocity (m/s)")
pylab.ylabel("Height (m)")
pylab.legend()
pylab.grid()
pylab.savefig("velmean.pdf")

pylab.clf()
pylab.plot(a.w, a.z, label=r'$U_z$')
pylab.xlabel("Velocity (m/s)")
pylab.ylabel("Height (m)")
pylab.legend()
pylab.grid()
pylab.savefig("wmean.pdf")

pylab.clf()
pylab.plot(wind_dir, a.z, label='wind direction')
pylab.xlabel("Wind direction (deg)")
pylab.ylabel("Height (m)")
#pylab.legend()
pylab.grid()
pylab.savefig("wind_dir.pdf")
예제 #43
0
def plot_Subplot(request):
    loadform = CSVUploadFileForm()
    TheoN = request.session.get('TheoN')
    RTN = request.session.get('RTN')
    PlotConf = request.session.get('PlotConf')
    Category = request.session.get('Category')
    if 'figure' in request.session:
        figval=request.session['figure']
        figurename = 'result'+str(figval)+'.png'
    else:
        figurename = 'test.jpg'
    
    start = request.POST['start']
    end = request.POST['end']
    fres = float(request.POST['freq'])
    fmin = float(request.POST['fmin'])
    fmax = float(request.POST['fmax'])
    
    if 'figure' in request.session:
        figval=request.session['figure']
        fig = plt.figure(figval+100,figsize=(8,5))
        plt.clf()
    else:
        fig = plt.figure(100,figsize=(10,6))
        figval = 0
    
    # CHANGE FOR LOCAL
    status,freq_tot = plot_DARM(start,end,fres, figval=figval+100)
    #status,freq_tot = plot_DARM_local(start,end,fres, figval)
    if status==1: # error in obtaining the data
        if 'figure' in request.session:
            figurename = 'result'+str(figval)+'.png'
        else:
            figurename = 'test.jpg'
        context={'errormsg':freq_tot,
                 'loadform':loadform,
                 'TheoN':TheoN,
                 'RTN':RTN,
                 'figurename':figurename,
                 'PlotConf':PlotConf,
                 'Category':Category
                 }
        return render(request, 'NoiseBudgetter/index.html',context)
    
    name = request.POST['categname']
    oneCategory = Category[name]
    
    ans = plot_oneCategonly(name,oneCategory,start,end,fres,freq_tot,figval=figval+100)
    status=ans[0]
    
    if status==0:
        ymin='None'
        ymax='None'
        msg = 'plotted'
        cattotal=ans[2]
        if (request.POST['ymin']!='') and (request.POST['ymax']!=''):
            ymin = float(request.POST['ymin'])
            ymax = float(request.POST['ymax'])
            plt.ylim([ymin,ymax])
        elif (request.POST['ymin']!='') or (request.POST['ymax']!=''):
            msg = 'Please specify y-axis range for both min and max.'
            
        plt.xlim([fmin,fmax])
        subfigurename = 'subplot'+str(figval)+'.png'
        fig.savefig('NoiseBudgetter/static/'+subfigurename)
        
        context={'errormsg':msg,
                 'loadform':loadform,
                 'TheoN':TheoN,
                 'RTN':RTN,
                 'figurename':figurename,
                 'PlotConf':PlotConf,
                 'Category':Category,
                 'Subplot':{'name':name,'figure':subfigurename}
                 }
        
    else:
        figurename = 'result'+str(figval)+'.png'
        context={'errormsg':ans[1],
                 'loadform':loadform,
                 'TheoN':TheoN,
                 'RTN':RTN,
                 'figurename':figurename,
                 'PlotConf':PlotConf,
                 'Category':Category
                 }
    return render(request, 'NoiseBudgetter/index.html',context)
예제 #44
0
def draw_fig(m, path):
    ax = sns.heatmap(m, linewidth=0.5, vmin=-1, vmax=1)
    plt.savefig(path, dpi=1000)
    plt.clf()
    def compute_color_priority(self):
        """
        Compute the color priority
        :return: 
        """
        print(" -- Compute color priority ...")
        # Load the gamut points location
        quantized_ab = np.load(os.path.join(self._data_dir, "pts_in_hull.npy"))

        if self._make_plot:
            plt.figure(figsize=(15, 15))
            grid_spec = gridspec.GridSpec(1, 1)
            ax = plt.subplot(grid_spec[0])

            for i in range(quantized_ab.shape[0]):
                ax.scatter(quantized_ab[:, 0], quantized_ab[:, 1])
                ax.annotate(str(i), (quantized_ab[i, 0], quantized_ab[i, 1]), fontsize=6)
                ax.set_xlim([-110, 110])
                ax.set_ylim([-110, 110])

        # Compute the color priority over a subset of the training set
        with h5py.File(os.path.join(self._data_dir, "HDF5_data_{}.h5".format(self._img_size)), "a") as hf_handler:

            x_ab = hf_handler["training_lab_data"][:100000][:, 1:, :, :]

            x_a = np.ravel(x_ab[:, 0, :, :])
            x_b = np.ravel(x_ab[:, 1, :, :])
            x_ab = np.vstack((x_a, x_b)).T

            if self._make_plot:
                plt.hist2d(x_ab[:, 0], x_ab[:, 1], bins=100, norm=LogNorm())
                plt.xlim([-110, 110])
                plt.ylim([-110, 110])
                plt.colorbar()
                plt.show()
                plt.clf()
                plt.close()

            # Create nearest neighbor instance with index = quantized_ab
            nearest_neighbor = 1
            nearest = nn.NearestNeighbors(n_neighbors=nearest_neighbor, algorithm="ball_tree").fit(quantized_ab)

            # Find index of nearest neighbor for x_ab
            distance, index = nearest.kneighbors(x_ab)

            # We now count the number of occurrences of each color
            index = np.ravel(index)
            counts = np.bincount(index)
            indexes = np.nonzero(counts)[0]
            priority_probability = np.zeros((quantized_ab.shape[0]))

            for i in range(quantized_ab.shape[0]):
                priority_probability[indexes] = counts[indexes]

            # We turn this into a color probability
            priority_probability = priority_probability / (1.0 * np.sum(priority_probability))

            # Save the probability
            np.save(os.path.join(self._data_dir, "DataSet_prior_probability_{}.npy".format(self._img_size)), priority_probability)

            if self._make_plot:
                plt.hist(priority_probability, bins=100)
                plt.yscale("log")
                plt.show()
예제 #46
0
def cob_health(args):
    log = coblog()
    log(
        f"\n"
        f"-----------------------------\n"
        f"   Network Health:{args.cob} \n"
        f"-----------------------------\n"
    )
    log(f"\nCreating reports in {os.getcwd()}\n\n")

    cob = co.COB(args.cob)
    if args.out is None:
        args.out = "{}_Health".format(cob.name)
    log(f"Output prefix: {args.out}")

    if args.edge_zscore_cutoff is not None:
        log("Changing Z-Score cutoff to {}", args.edge_zscore_cutoff)
        cob.set_sig_edge_zscore(args.edge_zscore_cutoff)

    log("Printing Summary ---------------------------------------------------")
    if not path.exists("{}.summary.txt".format(args.out)):
        with open("{}.summary.txt".format(args.out), "w") as OUT:
            # Print out the network summary
            cob.summary(file=OUT)
    else:
        log("Skipped summary.")

    log("Plotting Scores ----------------------------------------------------")
    if not path.exists("{}_CoexPCC_raw.png".format(args.out)):
        cob.plot_scores("{}_CoexPCC_raw.png".format(args.out), pcc=True)
    else:
        log("Skipped Raw.")

    if not path.exists("{}_CoexScore_zscore.png".format(args.out)):
        cob.plot_scores("{}_CoexScore_zscore.png".format(args.out), pcc=False)
    else:
        log("Skipped Norm.")

    log("Plotting Expression ------------------------------------------------")
    # if not path.exists('{}_Expr_raw.png'.format(args.out)):
    #    cob.plot(
    #        '{}_Expr_raw.png'.format(args.out),
    #        include_accession_labels=True,
    #        raw=True,
    #        cluster_method=None
    #    )
    # else:
    #    log('Skipped raw.')
    if not path.exists("{}_Expr_norm.png".format(args.out)):
        cob.plot_heatmap(
            "{}_Expr_norm.png".format(args.out),
            include_accession_labels=True,
            raw=False,
            cluster_method="ward",
            cluster_accessions=True,
        )
    else:
        log("Skipped norm.")
    # log('Plotting Cluster Expression-----------------------------------------')
    # if not path.exists('{}_Expr_cluster.png'.format(args.out)):
    #    cob.plot(
    #        '{}_Expr_cluster.png'.format(args.out),
    #        include_accession_labels=True,
    #        raw=False,
    #        cluster_accessions=True,
    #        avg_by_cluster=True
    #    )
    # else:
    #    log('Skipped norm.')

    log("Printing QC Statistics ---------------------------------------------")
    if args.refgen is not None:
        if not path.exists("{}_qc_gene.txt".format(args.out)):
            # Print out the breakdown of QC Values
            refgen = co.RefGen(args.refgen)
            gene_qc = cob._bcolz("qc_gene")
            gene_qc = gene_qc[gene_qc.pass_membership]
            gene_qc["chrom"] = ["chr" + str(refgen[x].chrom) for x in gene_qc.index]
            gene_qc = gene_qc.groupby("chrom").agg(sum, axis=0)
            # Add totals at the bottom
            totals = gene_qc.ix[:, slice(1, None)].apply(sum)
            totals.name = "TOTAL"
            gene_qc = gene_qc.append(totals)
            gene_qc.to_csv("{}_qc_gene.txt".format(args.out), sep="\t")
        else:
            log("Skipped QC summary.")

    log("Plotting Degree Distribution ---------------------------------------")
    if not path.exists("{}_DegreeDist.png".format(args.out)):
        degree = cob.degree["Degree"].values
        # Using powerlaw makes run-time warning the first time you use it.
        # This is still an open issue on the creators github.
        # The creator recommends removing this warning as long as there is a fit.
        np.seterr(divide="ignore", invalid="ignore")
        fit = powerlaw.Fit(degree, discrete=True, xmin=1)
        # get an axis
        ax = plt.subplot()
        # Calculate log ratios
        t2p = fit.distribution_compare("truncated_power_law", "power_law")
        t2e = fit.distribution_compare("truncated_power_law", "exponential")
        p2e = fit.distribution_compare("power_law", "exponential")
        # Plot!
        emp = fit.plot_ccdf(ax=ax, color="r", linewidth=3, label="Empirical Data")
        pwr = fit.power_law.plot_ccdf(
            ax=ax, linewidth=2, color="b", linestyle=":", label="Power law"
        )
        tpw = fit.truncated_power_law.plot_ccdf(
            ax=ax, linewidth=2, color="k", linestyle="-.", label="Truncated Power"
        )
        exp = fit.exponential.plot_ccdf(
            ax=ax, linewidth=2, color="g", linestyle="--", label="Exponential"
        )
        ####
        ax.set_ylabel("p(Degree≥x)")
        ax.set_xlabel("Degree Frequency")
        ax.legend(loc="best")
        plt.title("{} Degree Distribution".format(cob.name))
        # Save Fig
        try:
            plt.savefig("{}_DegreeDist.png".format(args.out))
        except FutureWarning as e:
            # This is a matplotlib bug
            pass
    else:
        log("Skipping Degree Dist.")

    if args.go is not None:
        log("Plotting GO --------------------------------------------------------")
        # Set the alpha based on the tails
        if args.two_tailed == True:
            alpha = 0.05 / 2
        else:
            alpha = 0.05
        # Generate the GO Table
        if not path.exists("{}_GO.csv".format(args.out)):
            go = co.GOnt(args.go)
            term_ids = []
            density_emp = []
            density_pvals = []
            locality_emp = []
            locality_pvals = []
            term_sizes = []
            term_desc = []
            terms_tested = 0
            # max_terms limits the number of GO terms tested (sub-sampling)
            if args.max_terms is not None:
                log("Limiting to {} GO Terms", args.max_terms)
                terms = go.rand(
                    n=args.max_terms,
                    min_term_size=args.min_term_size,
                    max_term_size=args.max_term_size,
                )
            else:
                # Else do the whole set (default is terms between 10 and 300 genes)
                terms = go.iter_terms(
                    min_term_size=args.min_term_size, max_term_size=args.max_term_size
                )
            for term in terms:
                # Some terms will lose genes that are not in networks
                term.loci = list(filter(lambda x: x in cob, term.loci))
                # Skip terms that are not an adequate size
                if len(term) < args.min_term_size or len(term) > args.max_term_size:
                    continue
                # set density value for two tailed go so we only test it once
                density = cob.density(term.loci)
                # one tailed vs two tailed test
                density_emp.append(density)
                #
                term_ids.append(term.id)
                term_sizes.append(len(term))
                term_desc.append(str(term.desc))
                # ------ Density
                # Calculate PVals
                density_bs = np.array(
                    [
                        cob.density(cob.refgen.random_genes(n=len(term.loci)))
                        for x in range(args.num_bootstraps)
                    ]
                )
                if density > 0:
                    pval = sum(density_bs >= density) / args.num_bootstraps
                else:
                    pval = sum(density_bs <= density) / args.num_bootstraps
                density_pvals.append(pval)

                # ------- Locality
                locality = cob.locality(term.loci, include_regression=True).resid.mean()
                locality_emp.append(locality)
                # Calculate PVals
                locality_bs = np.array(
                    [
                        cob.locality(
                            cob.refgen.random_genes(n=len(term.loci)),
                            include_regression=True,
                        ).resid.mean()
                        for x in range(args.num_bootstraps)
                    ]
                )
                if locality > 0:
                    pval = sum(locality_bs >= locality) / args.num_bootstraps
                else:
                    pval = sum(locality_bs <= locality) / args.num_bootstraps
                locality_pvals.append(pval)
                # -------------
                terms_tested += 1
                if terms_tested % 100 == 0 and terms_tested > 0:
                    log("Processed {} terms".format(terms_tested))
            go_enrichment = pd.DataFrame(
                {
                    "GOTerm": term_ids,
                    "desc": term_desc,
                    "size": term_sizes,
                    "density": density_emp,
                    "density_pval": density_pvals,
                    "locality": locality_emp,
                    "locality_pval": locality_pvals,
                }
            )
            # Calculate significance
            go_enrichment["density_significant"] = go_enrichment.density_pval < alpha
            go_enrichment["locality_significant"] = go_enrichment.locality_pval < alpha
            # Calculate bonferonni
            go_enrichment["density_bonferroni"] = go_enrichment.density_pval < (
                alpha / len(go_enrichment)
            )
            go_enrichment["locality_bonferroni"] = go_enrichment.locality_pval < (
                alpha / len(go_enrichment)
            )
            # Store the GO results in a CSV
            go_enrichment.sort_values(by="density_pval", ascending=True).to_csv(
                "{}_GO.csv".format(args.out), index=False
            )
            if terms_tested == 0:
                log.warn("No GO terms met your min/max gene criteria!")
        else:
            go_enrichment = pd.read_table("{}_GO.csv".format(args.out), sep=",")

        if not path.exists("{}_GO.png".format(args.out)):
            # Convert pvals to log10
            with np.errstate(divide="ignore"):
                # When no bootstraps are more extreme than the term, the minus log pval yields an infinite
                go_enrichment["density_pval"] = -1 * np.log10(
                    go_enrichment["density_pval"]
                )
                go_enrichment["locality_pval"] = -1 * np.log10(
                    go_enrichment["locality_pval"]
                )
                # Fix the infinites so they are plotted
                max_density = np.max(
                    go_enrichment["density_pval"][
                        np.isfinite(go_enrichment["density_pval"])
                    ]
                )
                max_locality = np.max(
                    go_enrichment["locality_pval"][
                        np.isfinite(go_enrichment["locality_pval"])
                    ]
                )
                go_enrichment.loc[
                    np.logical_not(np.isfinite(go_enrichment["density_pval"])),
                    "density_pval",
                ] = (max_density + 1)
                go_enrichment.loc[
                    np.logical_not(np.isfinite(go_enrichment["locality_pval"])),
                    "locality_pval",
                ] = (max_locality + 1)
            plt.clf()
            # Calculate the transparency based on the number of terms
            if len(go_enrichment) > 20:
                transparency_alpha = 0.05
            else:
                transparency_alpha = 1

            # --------------------------------------------------------------------
            # Start Plotting
            figure, axes = plt.subplots(3, 2, figsize=(12, 12))
            # -----------
            # Density
            # ----------
            log_alpha = -1 * np.log10(alpha)
            axes[0, 0].scatter(
                go_enrichment["density"],
                go_enrichment["density_pval"],
                alpha=transparency_alpha,
                color="blue",
            )
            axes[0, 0].set_xlabel("Empirical Density (Z-Score)")
            axes[0, 0].set_ylabel("Bootstraped -log10(p-value)")
            fold = sum(np.array(go_enrichment["density_pval"]) > log_alpha) / (
                alpha * len(go_enrichment)
            )
            axes[0, 0].axhline(y=-1 * np.log10(0.05), color="red")
            axes[0, 0].text(
                min(axes[0, 0].get_xlim()),
                -1 * np.log10(alpha) + 0.1,
                "{:.3g} Fold Enrichement".format(fold),
                color="red",
            )
            axes[0, 0].set_title("Density Health")
            # Plot pvalue by term size
            axes[1, 0].scatter(
                go_enrichment["size"],
                go_enrichment["density_pval"],
                alpha=transparency_alpha,
                color="blue",
            )
            axes[1, 0].set_ylabel("Bootstrapped -log10(p-value)")
            axes[1, 0].set_xlabel("Term Size")
            axes[1, 0].axhline(y=-1 * np.log10(alpha), color="red")
            axes[2, 0].scatter(
                go_enrichment["size"],
                go_enrichment["density"],
                alpha=transparency_alpha,
                color="blue",
            )
            # Plot raw density by term size
            axes[2, 0].scatter(
                go_enrichment.query(f"density_pval>{log_alpha}")["size"],
                go_enrichment.query(f"density_pval>{log_alpha}")["density"],
                alpha=transparency_alpha,
                color="r",
            )
            axes[2, 0].set_ylabel("Density")
            axes[2, 0].set_xlabel("Term Size")
            # ------------
            # Do Locality
            # ------------
            axes[0, 1].scatter(
                go_enrichment["locality"],
                go_enrichment["locality_pval"],
                alpha=transparency_alpha,
                color="blue",
            )
            axes[0, 1].set_xlabel("Empirical Locality (Residual)")
            axes[0, 1].set_ylabel("Bootstraped -log10(p-value)")
            fold = sum(np.array(go_enrichment["locality_pval"]) > log_alpha) / (
                0.05 * len(go_enrichment)
            )
            axes[0, 1].axhline(y=-1 * np.log10(0.05), color="red")
            axes[0, 1].text(
                min(axes[0, 1].get_xlim()),
                -1 * np.log10(alpha),
                "{:.3g} Fold Enrichement".format(fold),
                color="red",
            )
            axes[0, 1].set_title("Locality Health")
            axes[1, 1].scatter(
                go_enrichment["size"],
                go_enrichment["locality_pval"],
                alpha=transparency_alpha,
                color="blue",
            )
            axes[1, 1].set_xlabel("Term Size")
            axes[1, 1].set_ylabel("Bootstrapped -log10(p-value)")
            axes[1, 1].axhline(y=-1 * np.log10(0.05), color="red")
            axes[2, 1].scatter(
                go_enrichment["size"],
                go_enrichment["locality"],
                alpha=transparency_alpha,
                color="blue",
            )
            axes[2, 1].scatter(
                go_enrichment.query(f"locality_pval>{log_alpha}")["size"],
                go_enrichment.query(f"locality_pval>{log_alpha}")["locality"],
                alpha=transparency_alpha,
                color="r",
            )
            axes[2, 1].set_ylabel("Locality")
            axes[2, 1].set_xlabel("Term Size")
            # Save Figure
            plt.tight_layout()
            try:
                plt.savefig("{}_GO.png".format(args.out))
            except FutureWarning as e:
                pass
        else:
            log("Skipping GO Volcano.")
예제 #47
0
def plot(halo_ptcls,
         agn_ptcls,
         snapnum,
         suffix='',
         xmin=0,
         xmax=np.log10(500),
         ymin=-4.5,
         ymax=4,
         yspace=20,
         nbins=50,
         overplot=False,
         subhalo_ptcls=None,
         track_agn=True,
         track_subhalo_ptcls=False):
    plt.clf()
    #get time stamp
    snap_halo = steps[snapnum].halos[halonum]
    tsnap = snap_halo.timestep.time_gyr
    #make histogram
    heatmap, xedges, yedges = np.histogram2d(np.log10(abs(halo_ptcls.g['r'])),
                                             np.log10(entropy(halo_ptcls.g)),
                                             range=[[xmin, xmax], [ymin,
                                                                   ymax]],
                                             bins=nbins,
                                             weights=halo_ptcls.g['mass'])

    X, Y = np.meshgrid(xedges, yedges)
    hist = np.log10(
        heatmap.T /
        sum(sum(heatmap.T)))  #dividing by sum normalizes probability at each r
    plt.pcolormesh(X, Y, hist, vmin=-5.5, vmax=-1.5, cmap='gray')
    print "colormesh done"
    snap_halo = heatmap = X = Y = None  #clear cache
    del (halo_ptcls)
    gc.collect()

    plt.colorbar()
    if track_agn:
        agn_r = np.log10(abs(agn_ptcls.g['r']))
        plt.scatter(agn_r, np.log10(entropy(agn_ptcls.g)), color='r')
        print "AGN scatter done"
    if track_subhalo_ptcls:
        subhalo_r = np.log10(abs(subhalo_ptcls.g['r']))
        plt.scatter(subhalo_r,
                    np.log10(entropy(subhalo_ptcls.g)),
                    color='b',
                    alpha=0.1)
        print "subhalo scatter done"
    xticks = np.array([1, 10, 1e2, 5e2])
    plt.xticks(np.log10(xticks), xticks)
    plt.xlabel("R (kpc)")
    plt.ylabel(r"K (KeV cm$^2$)")
    if overplot:
        suffix += '_zoom'
        snap_entropy = entropy_mw[np.argmin(
            abs(np.array(profiletime) -
                tsnap))]  #because profiles calculated backwards from z=0
        profile_mask = np.ma.masked_invalid(snap_entropy).mask
        profile_r = (np.arange(len(snap_entropy)) + 1) / 10.
        profile_r = np.ma.masked_array(profile_r, profile_mask).compressed()
        snap_entropy = np.ma.masked_array(snap_entropy,
                                          profile_mask).compressed()
        smooth = binned_statistic(np.log10(profile_r),
                                  snap_entropy,
                                  range=(0, np.log10(500)),
                                  bins=100)
        plt.plot(smooth.bin_edges[0:-1],
                 np.log10(smooth.statistic),
                 c=cmap(0.25),
                 lw=2)
        print "overplot done"

    yticks = np.array([0.01, 0.1, 1, 10, 1e2, 1e3])
    plt.yticks(np.log10(yticks), yticks)
    plt.xlim(xmin, np.log10(5e2))
    plt.ylim(-2, np.log10(300))
    plt.text(0.2, np.log10(2000), '%0.2f Gyr' % tsnap)
    plt.savefig('halo_%d_snap_%d_rain%s.png' % (halonum, snapnum, suffix))
예제 #48
0
def plot(request):
    loadform = CSVUploadFileForm()
    TheoN = request.session.get('TheoN')
    RTN = request.session.get('RTN')
    PlotConf = request.session.get('PlotConf')
    Category = request.session.get('Category')
    
    start = request.POST['start']
    end = request.POST['end']
    fres = float(request.POST['freq'])
    fmin = float(request.POST['fmin'])
    fmax = float(request.POST['fmax'])
    
    if 'figure' in request.session:
        figval=request.session['figure']
        fig = plt.figure(figval,figsize=(10,6))
        plt.clf()
    else:
        fig = plt.figure(figsize=(10,6))
        figval=fig.number
        request.session['figure'] = figval
    
    # CHANGE FOR LOCAL
    status,freq_tot = plot_DARM(start,end,fres, figval)
    #status,freq_tot = plot_DARM_local(start,end,fres, figval)
    if status==1: # error in obtaining the data
        if 'figure' in request.session:
            figurename = 'result'+str(figval)+'.png'
        else:
            figurename = 'test.jpg'
        context={'errormsg':freq_tot,
                 'loadform':loadform,
                 'TheoN':TheoN,
                 'RTN':RTN,
                 'figurename':figurename,
                 'PlotConf':PlotConf,
                 'Category':Category
                 }
        return render(request, 'NoiseBudgetter/index.html',context)
    
    total = np.zeros(len(freq_tot))
    status = 0
    ans = [status,total]
    
    if (TheoN != None) and (TheoN != dict()):
        ans =plot_TheoN(TheoN, fmin, fmax, freq_tot, total, figval)
        status = ans[0]
    if status == 0:
        if (RTN != None) and (RTN != dict()):
            total = ans[1]
            ans=plot_RTN(RTN,start,end,fres,freq_tot,total,figval)
            status = ans[0]
    if status == 0:
        if (Category != None) and (Category != dict()):
            total = ans[1]
            ans = plot_Category(Category,start,end,fres,freq_tot,total,figval)
            status = ans[0]
            
    if status == 0:
        total = ans[1]
        #print(ans[1])
        plt.loglog(freq_tot,total,'k--',linewidth=2.0,label='total')
        plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
        plt.subplots_adjust(right=0.7)
        plt.xlim([fmin,fmax])
        plt.title('NB from '+start+' to '+end)
        
        ymin='None'
        ymax='None'
        msg = 'plotted'
        if (request.POST['ymin']!='') and (request.POST['ymax']!=''):
            ymin = float(request.POST['ymin'])
            ymax = float(request.POST['ymax'])
            plt.ylim([ymin,ymax])
        elif (request.POST['ymin']!='') or (request.POST['ymax']!=''):
            msg = 'Please specify y-axis range for both min and max.'
    
        figurename = 'result'+str(figval)+'.png'
        fig.savefig('NoiseBudgetter/static/'+figurename)
        
        PlotConf = {'start':start,'end':end,'fres':request.POST['freq'],'fmin':request.POST['fmin'],'fmax':request.POST['fmax'],'ymin':request.POST['ymin'],'ymax':request.POST['ymax']}
        request.session['PlotConf']=PlotConf
        
        context={'errormsg':msg,
                 'loadform':loadform,
                 'TheoN':TheoN,
                 'RTN':RTN,
                 'figurename':figurename,
                 'PlotConf':PlotConf,
                 'Category':Category
                 }
        
    else:
        figurename = 'result'+str(figval)+'.png'
        context={'errormsg':ans[1],
                 'loadform':loadform,
                 'TheoN':TheoN,
                 'RTN':RTN,
                 'figurename':figurename,
                 'PlotConf':PlotConf,
                 'Category':Category
                 }
    return render(request, 'NoiseBudgetter/index.html',context)
예제 #49
0
def scatter_calculated_vs_observed(all_calculated, all_observed, year):
    """For yearly data setts og all observed ice from regObs and all calculations on lakes where these observations
    are made, a scatter plot is made, comparing calculations vs. observations.

    :param all_calculated:
    :param all_observed:
    :param year:            [string]    Eg. '2017-18', '2016-17', ..
    """

    # Turn off interactive mode
    plt.ioff()

    scatter_plot_data = []
    for ln in all_observed.keys():
        skipp_first = True

        for oi in all_observed[ln]:

            if skipp_first:
                skipp_first = False
            else:
                observed_draft = oi.draft_thickness
                calculated_draft = None
                for ci in all_calculated[ln]:
                    if ci.date.date() == oi.date.date():
                        calculated_draft = ci.draft_thickness

                data = ObsCalScatterData(calculated_draft, observed_draft,
                                         oi.date.date(), ln)
                scatter_plot_data.append(data)

    x = []
    y = []
    color = []
    #marker = []
    for d in scatter_plot_data:
        x.append(d.observed_draft)
        y.append(d.caluclated_draft)
        color.append(d.marker_color())
        #marker.append(d.marker())

    file_name = '{0}calculated_vs_observed {1}'.format(se.plot_folder, year)

    # Figure dimensions
    fsize = (10, 10)
    plb.figure(figsize=fsize)
    plb.clf()

    plt.scatter(x, y, c=color, alpha=0.5)
    plt.plot([-0.5, 3], [-0.5, 3], 'k--')

    plb.title('Calculated vs. observed ice thickness {0}'.format(year))
    plt.xlabel('Observed thickness [m]')
    plt.ylabel('Calculated thickness [m]')

    # Make legend
    legend_symbols = []
    legend_names = []
    for k, v in scatter_plot_data[0].legend_colors.items():
        legend_symbols.append(
            mpatches.Circle((0, 0), radius=5, facecolor=v, alpha=0.5))
        legend_names.append(k)
    plt.legend(legend_symbols, legend_names)

    # axis limits
    plb.xlim(-0.05, 1.1)
    plb.ylim(-0.05, 1.1)

    # root mean squared
    error = gm.nash_sutcliffe_efficiancy_coefficient(x, y)
    plt.gcf().text(
        0.40, 0.85,
        'N: {0}  and  Nash-Sutcliffe: {1:.3f}'.format(len(x), error))

    # Plot created text
    plt.gcf().text(0.7,
                   0.05,
                   'Figure created {0:%Y-%m-%d %H:%M}'.format(
                       dt.datetime.now()),
                   color='0.5')

    plb.savefig(file_name)
예제 #50
0
def mkcirclefigMC(simdataMC=None,
                  linelevels=[0, 0.82],
                  plotstyle='contourf',
                  Nbins=80,
                  Nsim=2000,
                  clobber=0,
                  verbose=1,
                  snanadatfile=_SNANADATFILE):
    """  Plot the results of a SNANA monte carlo simulation as a circle diagram.

    :param simdataMC:
    :param linelevels:
    :param plotstyle:
    :param Nbins:
    :param nsim:
    :param clobber:
    :param verbose:
    :param snanadatfile:
    :return:
    """
    import snanasim
    sn = stardust.SuperNova(snanadatfile)

    if simdataMC is None:
        simdataMC = snanasim.dosimMC(sn,
                                     Nsim=Nsim,
                                     bands='XI78YJNHLOPQ',
                                     clobber=clobber,
                                     verbose=verbose)
    simIa, simIbc, simII = simdataMC

    mjdmedband = sn.MJD[np.where((sn.FLT == '7') | (sn.FLT == '8')
                                 | (sn.FLT == 'P'))]
    mjdobs = np.median(mjdmedband)

    print('Binning up MC sim for color-color diagram...')
    pl.clf()
    ax1 = pl.subplot(2, 2, 1)
    stardust.simplot.plotColorColor(simIa,
                                    '7-I',
                                    'P-N',
                                    binrange=[[-0.2, 0.8], [-0.5, 0.5]],
                                    mjdrange=[mjdobs, mjdobs],
                                    tsample=1,
                                    plotstyle=plotstyle,
                                    linelevels=linelevels,
                                    Nbins=Nbins,
                                    sidehist=False)
    #stardust.simplot._plot_colorcolor_singlesim( simIbc, '7-I','P-N', binrange=[[-0.2,0.8],[-0.5,0.5]], mjdrange=[mjdobs,mjdobs], tsample=1, plotstyle=plotstyle, linelevels=linelevels, Nbins=Nbins, sidehist=False )
    #stardust.simplot._plot_colorcolor_singlesim( simII,  '7-I','P-N', binrange=[[-0.2,0.8],[-0.5,0.5]], mjdrange=[mjdobs,mjdobs], tsample=1, plotstyle=plotstyle, linelevels=linelevels, Nbins=Nbins, sidehist=False )
    ax2 = pl.subplot(2, 2, 2)
    stardust.simplot.plotColorColor(simIa,
                                    '8-I',
                                    'P-N',
                                    binrange=[[-0.6, 0.4], [-0.5, 0.5]],
                                    mjdrange=[mjdobs, mjdobs],
                                    tsample=1,
                                    plotstyle=plotstyle,
                                    linelevels=linelevels,
                                    Nbins=Nbins,
                                    sidehist=False)
    #stardust.simplot._plot_colorcolor_singlesim( simIbc,  '8-I','P-N', binrange=[[-0.6,0.4],[-0.5,0.5]], mjdrange=[mjdobs,mjdobs], tsample=1, plotstyle=plotstyle, linelevels=linelevels, Nbins=Nbins, sidehist=False )
    #stardust.simplot._plot_colorcolor_singlesim( simII,   '8-I','P-N', binrange=[[-0.6,0.4],[-0.5,0.5]], mjdrange=[mjdobs,mjdobs], tsample=1, plotstyle=plotstyle, linelevels=linelevels, Nbins=Nbins, sidehist=False )
    ax3 = pl.subplot(2, 2, 3)
    stardust.simplot.plotColorColor(simIa,
                                    '7-I',
                                    '8-I',
                                    binrange=[[-0.2, 0.8], [-0.6, 0.4]],
                                    mjdrange=[mjdobs, mjdobs],
                                    tsample=1,
                                    plotstyle=plotstyle,
                                    linelevels=linelevels,
                                    Nbins=Nbins,
                                    sidehist=False)
    #stardust.simplot._plot_colorcolor_singlesim( simIbc,  '7-I','8-I', binrange=[[-0.2,0.8],[-0.6,0.4]], mjdrange=[mjdobs,mjdobs], tsample=1, plotstyle=plotstyle, linelevels=linelevels, Nbins=Nbins, sidehist=False )
    #stardust.simplot._plot_colorcolor_singlesim( simII,   '7-I','8-I', binrange=[[-0.2,0.8],[-0.6,0.4]], mjdrange=[mjdobs,mjdobs], tsample=1, plotstyle=plotstyle, linelevels=linelevels, Nbins=Nbins, sidehist=False )

    pl.draw()
    return simdataMC
def background_adjust_model(settings,
                            bkg_stack,
                            bkg_stack_num,
                            seed=0,
                            threading=True):
    offset = settings['detector'].get('train_offset', 0)
    limit = settings['detector'].get('train_limit')
    num_mixtures = settings['detector']['num_mixtures']
    assert limit is not None, "Must specify limit in the settings file"
    duplicates = settings['detector'].get('duplicates', 1)
    files = sorted(glob.glob(
        settings['detector']['train_dir']))[offset:offset + limit]

    #try:
    #    detector = gv.Detector.load(settings['detector']['file'])
    #except KeyError:
    #    raise Exception("Need to train the model first")

    # Create accumulates for each mixture component
    # TODO: Temporary until multicomp
    #counts = np.zeros_like(detector.kernel_templates)

    #num_files = len(files)
    #num_duplicates = settings['detector'].get('duplicate', 1)

    # Create several random states, so it's easier to measure
    # the influence of certain features

    # Setup unspread bedges settings
    #X_pad_size = padded_theta.shape[1:3]

    #for fn in files:
    #counts += _process_file(settings, bkg_stack, bkg_stack_num, fn)

    # Train a mixture model to get a clustering of the angles of the object

    descriptor = gv.load_descriptor(settings)

    if 0:
        detector = gv.BernoulliDetector(num_mixtures, descriptor,
                                        settings['detector'])
        detector.train_from_images(files)

        plt.clf()
        ag.plot.images(detector.support)
        plt.savefig('output/components.png')

        comps = detector.mixture.mixture_components()
        each_mix_N = np.bincount(comps, minlength=num_mixtures)

    comps = np.zeros(len(files))

    argses = [(settings, bkg_stack, bkg_stack_num, files[i], comps[i])
              for i in xrange(len(files))]

    # Iterate images
    all_counts = gv.parallel.imap(_process_file_star, argses)

    # Can dot this instead:
    counts = sum(all_counts)

    # Divide accmulate to get new distribution
    #counts /= num_files

    # Create a new model, with this distribution
    #new_detector = detector.copy()

    #new_detector.kernel_templates = counts
    #new_detector.support = None
    #new_detector.use_alpha = False

    # Return model
    #return new_detector
    return counts, each_mix_N * duplicates, detector.support, detector.mixture
예제 #52
0
def main():
    # Create images folder
    Path("Imgs/STEAD").mkdir(parents=True, exist_ok=True)

    # STEAD dataset path
    st = '../Data/STEAD/Train_data.hdf5'

    # Sampling frequency
    fs = 100

    # Number of traces to plot
    n = 4

    # Traces to plot
    trtp = []

    with h5py.File(st, 'r') as h5_file:

        # Seismic traces group
        grp = h5_file['earthquake']['local']

        # Traces to plot ids
        # trtp_ids = [0, 1, 2, 3]

        # Init rng
        rng = default_rng()

        # Traces to plot numbers
        trtp_ids = rng.choice(len(grp), size=n, replace=False)
        trtp_ids.sort()

        for idx, dts in enumerate(grp):
            if idx in trtp_ids:
                trtp.append(grp[dts][:, 0])

    # Sampling frequency
    fs = 100

    # Data len
    N = len(trtp[0])

    # Time axis for signal plot
    t_ax = np.arange(N) / fs

    # Frequency axis for FFT plot
    xf = np.linspace(-fs / 2.0, fs / 2.0 - 1 / fs, N)

    # Figure to plot
    pl.figure()

    # For trace in traces to print
    for idx, trace in enumerate(trtp):
        yf = sfft.fftshift(sfft.fft(trace))

        gs = gridspec.GridSpec(2, 2)

        pl.clf()
        pl.subplot(gs[0, :])
        pl.plot(t_ax, trace)
        pl.title(f'Traza STEAD y espectro #{trtp_ids[idx]}')
        pl.xlabel('Tiempo [s]')
        pl.ylabel('Amplitud [-]')
        pl.grid(True)

        pl.subplot(gs[1, 0])
        pl.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
        pl.xlabel('Frecuencia [Hz]')
        pl.ylabel('Amplitud [-]')
        pl.grid(True)

        pl.subplot(gs[1, 1])
        pl.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
        pl.xlim(-25, 25)
        pl.xlabel('Frecuencia [Hz]')
        pl.ylabel('Amplitud [-]')
        pl.grid(True)
        pl.tight_layout()
        pl.savefig(f'Imgs/STEAD/{trtp_ids[idx]}.png')
예제 #53
0
freq = np.array([
    1.337, 1.378, 1.4, 1.417, 1.438, 1.453, 1.462, 1.477, 1.487, 1.493, 1.497,
    1.5, 1.513, 1.52, 1.53, 1.54, 1.55, 1.567, 1.605, 1.628, 1.658
])
acc = np.array([
    .68, .90, 1.15, 1.50, 2.2, 3.05, 4.00, 7.00, 8.60, 8.15, 7.60, 7.1, 5.4,
    4.7, 3.8, 3.4, 3.1, 2.6, 1.95, 1.7, 1.5
])


def func(x, a, b, c, d):
    return a * np.power(x, 3) + b * np.power(x, 2) + c * x + d


xdata = np.linspace(1.3, 1.7, 40)
ydata = func(xdata, 1, 1, 1, 1) * np.size(xdata.size)

popt, pcov = curve_fit(func, freq, acc)
print popt
print pcov

# Plotting
plt.clf()  # Clearing any previous plots
plt.plot(freq,
         func(freq, *popt),
         'r-',
         label='fit: a=%5.3f, b=%5.3f, c=%5.3f d = %f' % tuple(popt))
plt.plot(freq, acc)
plt.xlabel('Excitation Frequency')
plt.ylabel('Acceleration (e-3 g)')
plt.grid()
def _process_file(settings, bkg_stack, bkg_stack_num, fn, mixcomp):
    ag.info("Processing file", fn)
    seed = np.abs(hash(fn) % 123124)
    descriptor_name = settings['detector']['descriptor']
    img_size = settings['detector']['image_size']
    part_size = settings[descriptor_name]['part_size']
    psize = settings['detector']['subsample_size']

    # The 4 is for the edge border that falls off
    #orig_sh = (img_size[0] - part_size[0] - 4 + 1, img_size[1] - part_size[1] - 4 + 1)
    orig_sh = img_size
    sh = gv.sub.subsample_size(np.ones(orig_sh), psize)

    # We need the descriptor to generate and manipulate images
    descriptor = gv.load_descriptor(settings)

    counts = np.zeros((settings['detector']['num_mixtures'], sh[0], sh[1],
                       descriptor.num_parts + 1, descriptor.num_parts),
                      dtype=np.uint16)

    prnds = [np.random.RandomState(seed + i) for i in xrange(5)]

    # Binarize support and Extract alpha
    #color_img, alpha = gv.img.load_image_binarized_alpha(fn)
    color_img = gv.img.load_image(fn)

    from skimage.transform import pyramid_reduce, pyramid_expand
    f = color_img.shape[0] / settings['detector']['image_size'][0]
    if f > 1:
        color_img = pyramid_reduce(color_img, downscale=f)
    elif f < 1:
        color_img = pyramid_expand(color_img, upscale=1 / f)

    alpha = color_img[..., 3]
    img = gv.img.asgray(color_img)

    # Resize it
    # TODO: This only looks at the first axis

    assert img.shape == settings['detector'][
        'image_size'], "Target size not achieved: {0} != {1}".format(
            img.shape, settings['detector']['image_size'])

    # Settings
    bsettings = settings['edges'].copy()
    radius = bsettings['radius']
    bsettings['radius'] = 0

    #offsets = gv.sub.subsample_offset_shape(sh, psize)

    #locations0 = xrange(offsets[0], sh[0], psize[0])
    #locations1 = xrange(offsets[1], sh[1], psize[1])
    locations0 = xrange(sh[0])
    locations1 = xrange(sh[1])
    #locations0 = xrange(10-4, 10+5)
    #locations1 = xrange(10-4, 10+5)

    #locations0 = xrange(10, 11)
    #locations1 = xrange(10, 11)

    #padded_theta = descriptor.unspread_parts_padded

    #pad = 10
    pad = 5
    size = settings[descriptor_name]['part_size']
    X_pad_size = (size[0] + pad * 2, size[1] + pad * 2)

    img_pad = ag.util.zeropad(img, pad)

    alpha_pad = ag.util.zeropad(alpha, pad)

    # Iterate every duplicate

    dups = settings['detector'].get('duplicates', 1)

    bkgs = np.empty(((descriptor.num_parts + 1) * dups, ) + X_pad_size)
    #cads = np.empty((descriptor.num_parts,) + X_pad_size)
    #alphas = np.empty((descriptor.num_parts,) + X_pad_size, dtype=np.bool)

    radii = settings['detector']['spread_radii']
    psize = settings['detector']['subsample_size']
    cb = settings['detector'].get('crop_border')
    sett = dict(spread_radii=radii, subsample_size=psize, crop_border=cb)

    plt.clf()
    plt.imshow(img)
    plt.savefig('output/img.png')

    if 0:
        # NEW{
        totfeats = np.zeros(sh + (descriptor.num_parts, ) * 2)
        for f in xrange(descriptor.num_parts):
            num = bkg_stack_num[f]

            for d in xrange(dups):
                feats = np.zeros(sh + (descriptor.num_parts, ), dtype=np.uint8)

                for i, j in itr.product(locations0, locations1):
                    x = i * psize[0]
                    y = i * psize[1]

                    bkg_i = prnds[4].randint(num)
                    bkg = bkg_stack[f, bkg_i]

                    selection = [
                        slice(x, x + X_pad_size[0]),
                        slice(y, y + X_pad_size[1])
                    ]
                    #X_pad = edges_pad[selection].copy()
                    patch = img_pad[selection]
                    alpha_patch = alpha_pad[selection]

                    #patch = np.expand_dims(patch, 0)
                    #alpha_patch = np.expand_dims(alpha_patch, 0)

                    # TODO: Which one?
                    #img_with_bkg = patch + bkg * (1 - alpha_patch)
                    img_with_bkg = patch * alpha_patch + bkg * (1 -
                                                                alpha_patch)

                    edges_pads = ag.features.bedges(img_with_bkg, **bsettings)
                    X_pad_spreads = ag.features.bspread(
                        edges_pads, spread=bsettings['spread'], radius=radius)

                    padding = pad - 2
                    X_spreads = X_pad_spreads[padding:-padding:,
                                              padding:-padding]

                    partprobs = ag.features.code_parts(
                        X_spreads, descriptor._log_parts,
                        descriptor._log_invparts,
                        descriptor.settings['threshold'],
                        descriptor.settings['patch_frame'])

                    part = partprobs.argmax()
                    if part > 0:
                        feats[i, j, part - 1] = 1

                # Now spread the parts
                feats = ag.features.bspread(feats, spread='box', radius=2)

                totfeats[:, :, f] += feats

        # }

        kernels = totfeats[:, :, 0].astype(
            np.float32) / (descriptor.num_parts * dups)

        # Subsample kernels
        sub_kernels = gv.sub.subsample(kernels, psize, skip_first_axis=False)

        np.save('tmp2.npy', sub_kernels)
        print 'saved tmp2.npy'
        import sys
        sys.exit(0)

    #ag.info("Iteration {0}/{1}".format(loop+1, num_duplicates))
    #ag.info("Iteration")
    for i, j in itr.product(locations0, locations1):
        x = i * psize[0]
        y = i * psize[1]

        print 'processing', i, j
        selection = [slice(x, x + X_pad_size[0]), slice(y, y + X_pad_size[1])]
        #X_pad = edges_pad[selection].copy()
        patch = img_pad[selection]
        alpha_patch = alpha_pad[selection]

        patch = np.expand_dims(patch, 0)
        alpha_patch = np.expand_dims(alpha_patch, 0)

        for f in xrange(descriptor.num_parts + 1):
            num = bkg_stack_num[f]

            for d in xrange(dups):
                bkg_i = prnds[4].randint(num)
                bkgs[f * dups + d] = bkg_stack[f, bkg_i]

        img_with_bkgs = patch * alpha_patch + bkgs * (1 - alpha_patch)

        if 0:
            edges_pads = ag.features.bedges(img_with_bkgs, **bsettings)
            X_pad_spreads = ag.features.bspread(edges_pads,
                                                spread=bsettings['spread'],
                                                radius=radius)

            padding = pad - 2
            X_spreads = X_pad_spreads[:, padding:-padding:, padding:-padding]

        #partprobs = ag.features.code_parts_many(X_spreads, descriptor._log_parts, descriptor._log_invparts,
        #descriptor.settings['threshold'], descriptor.settings['patch_frame'])

        #parts = partprobs.argmax(axis=-1)

        parts = np.asarray([
            descriptor.extract_features(im, settings=sett)[0, 0]
            for im in img_with_bkgs
        ])

        for f in xrange(descriptor.num_parts + 1):
            hist = np.bincount(parts[f * dups:(f + 1) * dups].ravel(),
                               minlength=descriptor.num_parts + 1)
            counts[mixcomp, i, j, f] += hist[1:]

        #import pdb; pdb.set_trace()

        #for f in xrange(descriptor.num_parts):
        #    for d in xrange(dups):
        #        # Code parts
        #        #parts = descriptor.extract_parts(X_spreads[f*dups+d].astype(np.uint8))


#
#                f_plus = parts[f*dups+d]
#                if f_plus > 0:
#tau = self.settings.get('tau')
#if self.settings.get('tau'):
#parts = partprobs.argmax(axis=-1)

# Accumulate and return
#                    counts[mixcomp,i,j,f,f_plus-1] += 1#parts[0,0]

    if 0:
        kernels = counts[:, :, :, 0].astype(
            np.float32) / (descriptor.num_parts * dups)

        import pdb
        pdb.set_trace()

        radii = (2, 2)

        aa_log = np.log(1 - kernels)
        aa_log = ag.util.zeropad(aa_log, (0, radii[0], radii[1], 0))

        integral_aa_log = aa_log.cumsum(1).cumsum(2)

        offsets = gv.sub.subsample_offset(kernels[0], psize)

        if 1:
            # Fix kernels
            istep = 2 * radii[0]
            jstep = 2 * radii[1]
            sh = kernels.shape[1:3]
            for mixcomp in xrange(1):
                # Note, we are going in strides of psize, given a certain offset, since
                # we will be subsampling anyway, so we don't need to do the rest.
                for i in xrange(offsets[0], sh[0], psize[0]):
                    for j in xrange(offsets[1], sh[1], psize[1]):
                        p = gv.img.integrate(integral_aa_log[mixcomp], i, j,
                                             i + istep, j + jstep)
                        kernels[mixcomp, i, j] = 1 - np.exp(p)

        # Subsample kernels
        sub_kernels = gv.sub.subsample(kernels, psize, skip_first_axis=True)

        np.save('tmp.npy', sub_kernels)
        print 'saved tmp.npy'
        import sys
        sys.exit(0)

    if 0:
        for f in xrange(descriptor.num_parts):

            # Pick only one background for this part and file
            num = bkg_stack_num[f]

            # Assumes num > 0

            bkg_i = prnds[4].randint(num)

            bkgmap = bkg_stack[f, bkg_i]

            # Composite
            img_with_bkg = gv.img.composite(patch, bkgmap, alpha_patch)

            # Retrieve unspread edges (with a given background gray level)
            edges_pad = ag.features.bedges(img_with_bkg, **bsettings)

            # Pad the edges
            #edges_pad = ag.util.zeropad(edges, (pad, pad, 0))

            # Do spreading
            X_pad_spread = ag.features.bspread(edges_pad,
                                               spread=bsettings['spread'],
                                               radius=radius)

            # De-pad
            padding = pad - 2
            X_spread = X_pad_spread[padding:-padding, padding:-padding]

            # Code parts
            parts = descriptor.extract_parts(X_spread.astype(np.uint8))

            # Accumulate and return
            counts[mixcomp, i, j, f] += parts[0, 0]

    # Translate counts to spread counts (since we're assuming independence of samples within one CAD image)

    return counts
예제 #55
0
def calc_disparr_ctrl_plot(D, timestamps, oflow_source_data,
                           oflow_source_data_forecast, cfg_set):
    """Plot and save two frames as input of LucasKanade and the advected field, as well as plot the displacement field.
   
    Parameters
    ----------
    
    cfg_set : dict
        Basic variables defined in input_NOSTRADAMUS_ANN.py
    
    timestamps : datetime object
        Timestamps of optical flow input fields.
    
    D : array-like
        Displacement field.
    
    oflow_source_data : array-like
        Optical flow input fields.
        
    oflow_source_data_forecast : array-like
        Advected optical flow input field.
    """

    plt.subplot(1, 2, 1)
    plt.imshow(D[0, :, :])
    plt.title('X component of displacement field')

    plt.subplot(1, 2, 2)
    plt.imshow(D[1, :, :])
    plt.title('Y component of displacement field')
    #plt.show()

    # Plot two frames for UV-derivation and advected frame
    for i in range(oflow_source_data.shape[0] + 1):
        plt.clf()
        if True:
            if i < oflow_source_data.shape[0]:
                # Plot last observed rainfields
                st.plt.plot_precip_field(
                    oflow_source_data[i, :, :],
                    None,
                    units="mmhr",
                    colorscale=cfg_set["colorscale"],
                    title=timestamps[i].strftime("%Y-%m-%d %H:%M"),
                    colorbar=True)

                figname = "%s%s_%s_simple_advection_%02d_obs.png" % (
                    cfg_set["output_path"],
                    timestamps[i].strftime("%Y%m%d%H%M"),
                    cfg_set["oflow_source"], i)
                plt.savefig(figname)
                print(figname, 'saved.')
            else:
                # Plot nowcast
                st.plt.plot_precip_field(
                    oflow_source_data_forecast[
                        i - oflow_source_data.shape[0], :, :],
                    None,
                    units="mmhr",
                    title="%s +%02d min" %
                    (timestamps[-1].strftime("%Y-%m-%d %H:%M"),
                     (1 + i - oflow_source_data.shape[0]) *
                     cfg_set["timestep"]),
                    colorscale=cfg_set["colorscale"],
                    colorbar=True)
                figname = "%s%s_%s_simple_advection_%02d_nwc.png" % (
                    cfg_set["output_path"],
                    timestamps[-1].strftime("%Y%m%d%H%M"),
                    cfg_set["oflow_source"], i)
                plt.savefig(figname)
                print(figname, "saved.")
    def plot_options(self, sess, coord, saver):
        eigenvectors_path = os.path.join(
            os.path.join(self.config.stage_logdir, "models"),
            "eigenvectors.npy")
        eigenvalues_path = os.path.join(
            os.path.join(self.config.stage_logdir, "models"),
            "eigenvalues.npy")
        eigenvectors = np.load(eigenvectors_path)
        eigenvalues = np.load(eigenvalues_path)
        for k in ["poz", "neg"]:
            for option in range(len(eigenvalues)):
                # eigenvalue = eigenvalues[option]
                eigenvector = eigenvectors[
                    option] if k == "poz" else -eigenvectors[option]
                prefix = str(option) + '_' + k + "_"

                plt.clf()

                with sess.as_default(), sess.graph.as_default():
                    for idx in range(self.nb_states):
                        dx = 0
                        dy = 0
                        d = False
                        s, i, j = self.env.get_state(idx)

                        if not self.env.not_wall(i, j):
                            plt.gca().add_patch(
                                patches.Rectangle(
                                    (j, self.config.input_size[0] - i -
                                     1),  # (x,y)
                                    1.0,  # width
                                    1.0,  # height
                                    facecolor="gray"))
                            continue
                        # Image.fromarray(np.asarray(scipy.misc.imresize(s, [512, 512], interp='nearest'), np.uint8)).show()
                        # feed_dict = {self.orig_net.observation: np.stack([s])}
                        # fi = sess.run(self.orig_net.fi,
                        #               feed_dict=feed_dict)[0]
                        a = self.option_policy_evaluation_eval(s, sess)
                        if a == self.action_size:
                            circle = plt.Circle(
                                (j + 0.5,
                                 self.config.input_size[0] - i + 0.5 - 1),
                                0.025,
                                color='k')
                            plt.gca().add_artist(circle)
                            continue

                        if a == 0:  # up
                            dy = 0.35
                        elif a == 1:  # right
                            dx = 0.35
                        elif a == 2:  # down
                            dy = -0.35
                        elif a == 3:  # left
                            dx = -0.35

                        plt.arrow(j + 0.5,
                                  self.config.input_size[0] - i + 0.5 - 1,
                                  dx,
                                  dy,
                                  head_width=0.05,
                                  head_length=0.05,
                                  fc='k',
                                  ec='k')

                    plt.xlim([0, self.config.input_size[1]])
                    plt.ylim([0, self.config.input_size[0]])

                    for i in range(self.config.input_size[1]):
                        plt.axvline(i, color='k', linestyle=':')
                    plt.axvline(self.config.input_size[1],
                                color='k',
                                linestyle=':')

                    for j in range(self.config.input_size[0]):
                        plt.axhline(j, color='k', linestyle=':')
                    plt.axhline(self.config.input_size[0],
                                color='k',
                                linestyle=':')

                    plt.savefig(
                        os.path.join(
                            self.summary_path,
                            "SuccessorFeatures_" + prefix + 'policy.png'))
                    plt.close()
    def viz_options(self, sess, coord, saver):
        with sess.as_default(), sess.graph.as_default():
            self.sess = sess
            self.saver = saver
            folder_path = os.path.join(
                os.path.join(self.config.stage_logdir, "summaries"),
                "policies")
            tf.gfile.MakeDirs(folder_path)
            matrix_path = os.path.join(
                os.path.join(self.config.stage_logdir, "models"), "matrix.npy")
            self.matrix_sf = np.load(matrix_path)
            u, s, v = np.linalg.svd(self.matrix_sf)
            eigenvalues = s[1:1 + self.nb_options]
            eigenvectors = v[1:1 + self.nb_options]
            plt.clf()

            with sess.as_default(), sess.graph.as_default():
                for idx in range(self.nb_states):
                    dx = 0
                    dy = 0
                    d = False
                    s, i, j = self.env.get_state(idx)
                    if not self.env.not_wall(i, j):
                        plt.gca().add_patch(
                            patches.Rectangle(
                                (j,
                                 self.config.input_size[0] - i - 1),  # (x,y)
                                1.0,  # width
                                1.0,  # height
                                facecolor="gray"))
                        continue

                    feed_dict = {self.local_network.observation: np.stack([s])}
                    max_q_val, q_vals, option, primitive_action, options, o_term = self.sess.run(
                        [
                            self.local_network.max_q_val,
                            self.local_network.q_val,
                            self.local_network.max_options,
                            self.local_network.primitive_action,
                            self.local_network.options,
                            self.local_network.termination
                        ],
                        feed_dict=feed_dict)
                    max_q_val = max_q_val[0]
                    # q_vals = q_vals[0]

                    o, primitive_action = option[0], primitive_action[0]
                    # q_val = q_vals[o]
                    primitive_action = o >= self.config.nb_options
                    if primitive_action:
                        a = o - self.nb_options
                        o_term = True
                    else:
                        pi = options[0, o]
                        action = np.random.choice(pi, p=pi)
                        a = np.argmax(pi == action)
                        o_term = o_term[0, o] > np.random.uniform()

                    if a == 0:  # up
                        dy = 0.35
                    elif a == 1:  # right
                        dx = 0.35
                    elif a == 2:  # down
                        dy = -0.35
                    elif a == 3:  # left
                        dx = -0.35

                    if o_term and not primitive_action:  # termination
                        circle = plt.Circle(
                            (j + 0.5, self.config.input_size[0] - i + 0.5 - 1),
                            0.025,
                            color='r' if primitive_action else 'k')
                        plt.gca().add_artist(circle)
                        continue
                    plt.text(j,
                             self.config.input_size[0] - i - 1,
                             str(o),
                             color='r' if primitive_action else 'b',
                             fontsize=8)
                    plt.text(j + 0.5,
                             self.config.input_size[0] - i - 1,
                             '{0:.2f}'.format(max_q_val),
                             fontsize=8)

                    plt.arrow(j + 0.5,
                              self.config.input_size[0] - i + 0.5 - 1,
                              dx,
                              dy,
                              head_width=0.05,
                              head_length=0.05,
                              fc='r' if primitive_action else 'k',
                              ec='r' if primitive_action else 'k')

                plt.xlim([0, self.config.input_size[1]])
                plt.ylim([0, self.config.input_size[0]])

                for i in range(self.config.input_size[1]):
                    plt.axvline(i, color='k', linestyle=':')
                plt.axvline(self.config.input_size[1],
                            color='k',
                            linestyle=':')

                for j in range(self.config.input_size[0]):
                    plt.axhline(j, color='k', linestyle=':')
                plt.axhline(self.config.input_size[0],
                            color='k',
                            linestyle=':')

                plt.savefig(
                    os.path.join(self.summary_path, 'Training_policy.png'))
                plt.close()
예제 #58
0
def sarima(lc1,
           orderin=(1, 1, 0),
           seasonal_order='auto',
           trend='c',
           enforce_invertibility=False,
           alpha_confidence=0.32,
           time_forecast=10.0,
           diagnostic_plot=''):

    if (type(lc1) == np.ndarray):
        time = lc1[:, 0]
        signal = lc1[:, 1]
        print('sarima input is numpy array')
    elif (type(lc1) == pd.core.frame.DataFrame):
        print('sarima input is a data frame')
        a = df2lc.convert_df_2_lightcurve(lc1,
                                          reftime='auto',
                                          noise=0.1,
                                          normalise=0)
        print(a)
        time = a[:, 0]
        signal = a[:, 1]

    print(type(lc1))

    dt = np.mean(time[1:] - time[:-1])
    nforecast = np.int(time_forecast / dt)
    ntime = np.shape(time)[0]
    tfclo = time[ntime - 1]
    tfchi = tfclo + nforecast * dt
    idx_forecast_lo = ntime - 1
    idx_forecast_hi = ntime + nforecast

    #set up the model parameters
    if (seasonal_order == 'auto'):
        so_final = np.int(0.70 * ntime)
        so = (0, 1, 0, so_final)
    else:
        so = seasonal_order

    #fit the sarima model
    model = sm.tsa.statespace.SARIMAX(endog=signal,
                                      order=orderin,
                                      seasonal_order=so,
                                      trend='c',
                                      enforce_invertibility=False)

    for i in range(ntime):
        print(i, time[i], signal[i])
    print('idx fc ', idx_forecast_lo, idx_forecast_hi)
    results = model.fit()
    pred = results.get_prediction(start=ntime, end=idx_forecast_hi)
    ps = pred.summary_frame(alpha=alpha_confidence)
    pslo = np.array(ps['mean_ci_lower'])
    pshi = np.array(ps['mean_ci_upper'])
    npred = np.shape(pslo)[0]

    #output the forecast time series
    ymean = np.array(ps['mean'])
    ylo = np.array(ps['mean_ci_lower'])
    yhi = np.array(ps['mean_ci_upper'])
    nym = np.shape(ymean)[0]
    xmod = np.linspace(tfclo, tfchi, nym)

    #plot the time series 9can also use dweek.plot() for simple option but less customisation
    if (diagnostic_plot != ''):
        x = time
        y = signal
        fig = plt.figure()
        ax1 = fig.add_subplot(111)
        ax1.plot(x, y, label='data')
        ax1.plot(xmod, ymean, label='forecast')
        ax1.set_title(lab)
        ax1.fill_between(xmod, ylo, yhi, alpha=0.3, label='uncertainty')
        ax1.set_xlabel('Time')
        ax1.set_ylabel('light curve')
        plt.legend(fontsize='xx-small')
        if (disgnostic_plot != 'show'):
            plt.savefig(diagnostic_plot)
            plt.clf()
        else:
            plt.show()

    #if the input was a data frame convert the time axis back into datetime format
    if (type(lc1) == pd.core.frame.DataFrame):
        date_start = lc1.values[0, 0]
        date_final = lc1.values[-1, 0]
        time_out = np.array([
            date_final + datetime.timedelta(days=xm - xmod[0]) for xm in xmod
        ])
    else:
        time_out = xmod

    #compute the model Mean Absolute Percentage Error (MAPE) using cross validation
    #extract a smaller time series (smaller by length equal to the forecast)
    #use cross validation to work out the MAPE.
    frac_check = 0.9
    id_check = np.int(frac_check * ntime)
    model_check = sm.tsa.statespace.SARIMAX(endog=signal[:id_check],
                                            order=orderin,
                                            seasonal_order=so,
                                            trend='c',
                                            enforce_invertibility=False,
                                            enforce_stationarity=False)
    results_check = model_check.fit()
    pred_check = results_check.get_prediction(start=0, end=ntime - 1)
    ps_check = pred_check.summary_frame(alpha=alpha_confidence)
    y_check = np.array(ps_check['mean'])

    print(id_check, ntime)
    print(y_check)
    print(signal[id_check:])

    mape = np.sum(
        np.abs(y_check[id_check:] - signal[id_check:]) /
        signal[id_check:]) / (ntime - id_check)

    #import matplotlib.pylab as plt

    #plt.clf()
    #plt.plot(signal)
    #plt.plot(y_check)
    #plt.show()
    #print(mape,ntime-id_check)
    #input()

    return (time_out, ymean, ylo, yhi, mape)
예제 #59
0
    def plot_hist(self, par_list):
        """ Plots histograms for samples of parameters, model results, residuals, and test data

        :param par_list: list of string for parameter names
        :return: None; creates and saves plots in main class output directory
        """
        print("Plotting histograms and graphs...")
        outdir = self.outmcmc + 'histograms/'
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        # plotting histograms for parameters
        for i in range(self.ndim):
            plt.figure(i)
            dist = self.samples_i[:, i]
            plt.hist(dist, 30, facecolor='blue', alpha=0.5)
            if i == 0: plt.title('Alpha')
            if i == 1: plt.title('Sigma')
            if (i > 1) and (i < self.ndim_blr + 2): plt.title(par_list[i])
            #if i >= self.ndim_blr + 2:
            #    plt.title('GP par: '+str(i-int(self.ndim_blr) - 1))
            plt.axvline(self.params_fit[i], c='r', ls='-')
            plt.axvline(self.params_mean[i], c='b', ls='-')
            plt.axvline(self.params_mean[i] + self.errors_fit[i],
                        c='b',
                        ls='--')
            plt.axvline(self.params_mean[i] - self.errors_fit[i],
                        c='b',
                        ls='--')
            plt.axvline(0., c='k', ls='--')
            plt.draw()
            plt.savefig(outdir + 'hist_' + par_list[i] + '.png')
        # plot observed vs modeled
        xmin = np.min(np.concatenate([self.y, self.y_test]))
        xmax = np.max(np.concatenate([self.y, self.y_test]))
        xdiff = xmax - xmin
        ymin = np.min(np.concatenate([self.y_model, self.y_model_test]))
        ymax = np.max(np.concatenate([self.y_model, self.y_model_test]))
        ydiff = ymax - ymin
        x_range = [xmin - xdiff * 0.2, xmax + xdiff * 0.2]
        y_range = [ymin - ydiff * 0.2, ymax + ydiff * 0.2]
        plt.clf()
        plt.plot(x_range, y_range, '.', alpha=0.0)
        plt.plot(self.y, self.y_model, 'o', c='b', label='Train', alpha=0.5)
        plt.plot(self.y_test,
                 self.y_model_test,
                 'o',
                 c='r',
                 label='Test',
                 alpha=0.5)
        plt.plot(self.y_model, self.y_model, 'k--', label='Perfect Prediction')
        plt.xlabel('Observed y')
        plt.ylabel('Predicted y')
        plt.title('Predicted vs Ground Truth')
        # plt.title('Predicted vs Ground Truth')
        plt.legend(loc='upper left', numpoints=1)
        ax = plt.axes([0.63, 0.15, 0.25, 0.25], frameon=True)
        ax.hist(self.residual,
                alpha=0.5,
                bins=16,
                color='b',
                stacked=True,
                normed=True)
        ax.hist(self.residual_test,
                alpha=0.5,
                bins=16,
                color='r',
                stacked=True,
                normed=True)
        #ax.xaxis.set_ticks(np.arange(-2, 2, 1))
        ax.set_title('Residual')
        plt.draw()
        plt.savefig(self.outmcmc + 'Data_vs_Model_1d.png')
        # plot more histograms:
        # plt.clf()
        # plt.hist(self.residual_i, 30, facecolor='blue', alpha=0.5)
        # plt.title('Sum Residual y-model')
        # plt.ylabel('N Samples')
        # plt.draw()
        # plt.savefig(outdir + 'hist_residual.png')
        plt.clf()
        plt.plot(self.y, self.mu_blr, 'o')
        plt.xlabel('Actual ' + target_desc)
        plt.ylabel('Predicted ' + target_desc)
        plt.title('Predicted from Demographics vs Ground Truth')
        plt.draw()
        plt.savefig(self.outmcmc + 'Data_ModelBLR_1d.png')
        plt.clf()
        plt.plot(self.y_model, self.residual, 'o')
        plt.plot([min(self.y_model), max(self.y_model)], [0, 0], 'k--')
        plt.xlabel('Predicted ' + target_desc)
        plt.ylabel('Residual')
        plt.draw()
        plt.savefig(self.outmcmc + 'Model_Residual_1d.png')
    def viz_options2(self, sess, coord, saver):
        with sess.as_default(), sess.graph.as_default():
            self.sess = sess
            self.saver = saver
            folder_path = os.path.join(
                os.path.join(self.config.stage_logdir, "summaries"),
                "policies")
            tf.gfile.MakeDirs(folder_path)
            matrix_path = os.path.join(
                os.path.join(self.config.stage_logdir, "models"), "matrix.npy")
            self.matrix_sf = np.load(matrix_path)
            u, s, v = np.linalg.svd(self.matrix_sf)
            eigenvalues = s[1:1 + self.nb_options]
            eigenvectors = v[1:1 + self.nb_options]

            for option in range(len(eigenvalues)):
                prefix = str(option) + '_'
                plt.clf()

                with sess.as_default(), sess.graph.as_default():
                    for idx in range(self.nb_states):
                        dx = 0
                        dy = 0
                        d = False
                        s, i, j = self.env.get_state(idx)
                        if not self.env.not_wall(i, j):
                            plt.gca().add_patch(
                                patches.Rectangle(
                                    (j, self.config.input_size[0] - i -
                                     1),  # (x,y)
                                    1.0,  # width
                                    1.0,  # height
                                    facecolor="gray"))
                            continue

                        feed_dict = {
                            self.local_network.observation: np.stack([s])
                        }
                        fi, options, o_term = sess.run([
                            self.local_network.fi, self.local_network.options,
                            self.local_network.termination
                        ],
                                                       feed_dict=feed_dict)
                        fi, options, o_term = fi[0], options[0], o_term[0]
                        pi = options[option]
                        action = np.random.choice(pi, p=pi)
                        a = np.argmax(pi == action)
                        o_term = o_term[option] > np.random.uniform()
                        if a == 0:  # up
                            dy = 0.35
                        elif a == 1:  # right
                            dx = 0.35
                        elif a == 2:  # down
                            dy = -0.35
                        elif a == 3:  # left
                            dx = -0.35

                        if o_term:  # termination
                            circle = plt.Circle(
                                (j + 0.5,
                                 self.config.input_size[0] - i + 0.5 - 1),
                                0.025,
                                color='k')
                            plt.gca().add_artist(circle)
                            continue

                        plt.arrow(j + 0.5,
                                  self.config.input_size[0] - i + 0.5 - 1,
                                  dx,
                                  dy,
                                  head_width=0.05,
                                  head_length=0.05,
                                  fc='k',
                                  ec='k')

                    plt.xlim([0, self.config.input_size[1]])
                    plt.ylim([0, self.config.input_size[0]])

                    for i in range(self.config.input_size[1]):
                        plt.axvline(i, color='k', linestyle=':')
                    plt.axvline(self.config.input_size[1],
                                color='k',
                                linestyle=':')

                    for j in range(self.config.input_size[0]):
                        plt.axhline(j, color='k', linestyle=':')
                    plt.axhline(self.config.input_size[0],
                                color='k',
                                linestyle=':')

                    plt.savefig(
                        os.path.join(self.summary_path,
                                     "Option_" + prefix + 'policy.png'))
                    plt.close()