Exemple #1
0
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import pylab
import C14tools
import matplotlib

#%% fill Layer_top, Layer_bottom using Layer_top_norm Layer_bottom_norm
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1]) 
newdata = prep.get_originalLayerdepth(data)
newdata.to_csv('Non_peat_data_synthesis2.csv',encoding='iso-8859-1')
#%% plot 14C and SOC profile at each site
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1]) 
pid = data[data['Start_of_Profile']==1].index # index of profile start
d14C = prep.getvarxls(data,'D14C_BulkLayer', pid, ':')
sampleyr = prep.getvarxls(data, 'SampleYear', pid, ':')
tau, cost = C14tools.cal_tau(d14C, sampleyr)
data['tau'] = pd.Series(tau[:,0], index=data.index)

#%% plot individual profile
fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(16,10))
for fign in range(15):
    i = fign + 135
    ax1 = fig.axes[fign]
    plt.gca().invert_yaxis()
    ax2 = ax1.twiny()
    Y = data.loc[pid[i]:pid[i+1]-1,['Layer_bottom']]
    X1 = data.loc[pid[i]:pid[i+1]-1,['D14C_BulkLayer']]
    # total SOC
    X2 = np.array(data.loc[pid[i]:pid[i+1]-1,['BulkDensity']]).astype(float)* \
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1', skiprows=[1])  
df = data[data['ProfileID'].isin(mdlprofid)]
df = df[['ProfileID','Author','Site','Lon','Lat','Layer_bottom','SampleYear','reference','title']]
aa = df.groupby('ProfileID').last()
# attach Cave14C
dum = np.loadtxt('tot48prof.txt', delimiter=',')
ave14C = pd.Series(dum[:,5], index=dum[:,2])
aa['ave14C'] = pd.Series(ave14C, index=aa.index)
aa.to_csv('./prof4modelinglist.csv')

# median sample year
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1', skiprows=[1])  
profid = data[data['Start_of_Profile']==1].index # index of profile start
sampleyr = prep.getvarxls(data, 'SampleYear', profid, 0)
print 'median sample year is: ', np.median(sampleyr)
print 'mean sample year is: ', np.nanmean(sampleyr)
#%% extract HWSD soc of the 48 profiles, this file needs to be updated once profile change
sawtcfn = 'AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_S_SOC.nc4'
tawtcfn = 'AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_T_SOC.nc4'
totprof = np.loadtxt('tot48prof.txt', unpack=True, delimiter=',').T
sawtc = prep.getHWSD(sawtcfn, totprof[:,0], totprof[:,1])
tawtc = prep.getHWSD(tawtcfn, totprof[:,0], totprof[:,1])
hwsdsoc = sawtc + tawtc
outf = open('hwsd48profsoc.txt',"w")
for item in hwsdsoc:
    outf.write("%.2f\n" % item)
outf.close()
#%% ---------  plot global 14C modeling D14C histogram vs synthesized data
pathh = "C:\\download\\work\\!manuscripts\\14Cboxmodel\\CMIP5_dataAnalysis\\" + \
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
from mpl_toolkits.basemap import Basemap, cm
import mynetCDF as mync
from netCDF4 import Dataset
import myplot as myplt
import mystats as mysm

#%% plot all profiles
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
profid = data.index.unique()
lons = prep.getvarxls(data,'Lon',profid,0)
lats = prep.getvarxls(data,'Lat',profid,0)
fig = plt.figure()
ax = fig.add_axes([0.05,0.05,0.9,0.9])
m = Basemap(llcrnrlon=-180,llcrnrlat=-60,urcrnrlon=180,urcrnrlat=80,projection='mill',lon_0=0,lat_0=0)
lon, lat = np.meshgrid(lons, lats)
x, y = m(lons,lats)
m.drawcoastlines(linewidth=0.25)
m.drawcountries(linewidth=0.25)
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='grey',lake_color='#99ffff',zorder=0)
m.scatter(lons,lats,15,marker='^',color='r',alpha=0.7,latlon=True)
# draw parallels.
parallels = np.arange(-90.,90.,30.)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
# draw meridians
Exemple #4
0
            for n, tau in enumerate(tauslow):
                #print 'tauslow: %d, #%d'%(tau, n)
                dum = obs - FMtoD14C(cal_prebomb_FM(1./tau))
                cost.append(dum**2)
            besttau.append(tauslow[np.argmin(np.asarray(cost))])
    return besttau
    
#%% test cal_tau
import D14Cpreprocess as prep
import pandas as pd
import C14tools
filename = 'Non_peat_data_synthesis.csv'
Cave14C = prep.getCweightedD14C2(filename)
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
profid = Cave14C[:,3]
d14C = prep.getvarxls(data,'D14C_BulkLayer', profid, ':')
sampleyr = prep.getvarxls(data, 'SampleYear', profid, ':')

n0 = 40
nend = 60
%timeit tau, cost = C14tools.cal_tau(d14C[n0:nend], sampleyr[n0:nend])

#%%
import numba as nb
#@nb.jit(nb.f8(nb.f8[:]))
#@nb.autojit
def summ(arr):
    summ = 0.
    for i in arr:
        summ = summ + i
    return summ
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import scipy.stats as stats
import statsmodels.api as sm
import mystats as mysm
import myplot
import pylab
import random
from collections import Counter
#%% prepare data
# get 14C and SOC of total profiles
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
layerbot = prep.getvarxls(data, 'Layer_bottom_norm', data.index.unique(), -1)
plt.hist(layerbot, 60)
cutdep = 40.
Cave14C = prep.getCweightedD14C2(filename, cutdep=cutdep)
nprof = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep)].shape[0] # n = 138
totprofid = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep), 3]
totprof14C = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep), 4]
totprofSOC = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep), 5]
totprofveg = prep.getvarxls(data,'VegTypeCode_Local',totprofid,0)
dum = list(totprofveg); Counter(dum)

# get 14C and SOC of profiles selected for modeling
pltorisitesonly = 0
filename = 'sitegridid2.txt'
data = np.loadtxt(filename,unpack=True,delimiter=',')[:,:].T
mdlprofid = data[:,2].astype(float)
Exemple #6
0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import scipy.stats as stats
from sklearn import linear_model
import statsmodels.api as sm
import mystats as mysm
import myplot
import scipy.io
import C14tools
#%% linear regression of Cave14C. MAT, MAP
filename = 'Non_peat_data_synthesis.csv'
Cave14C = prep.getCweightedD14C2(filename,cutdep=40.)
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
mat = prep.getvarxls(data,'MAT',Cave14C[~np.isnan(Cave14C[:,4]),3],0)
mapp = prep.getvarxls(data,'MAP',Cave14C[~np.isnan(Cave14C[:,4]),3],0)

x = np.c_[mat.astype(float),mapp.astype(float)]
y = Cave14C[~np.isnan(Cave14C[:,4]),4]
notNaNs = ~np.any(np.isnan(x),1) & ~np.isnan(y)
#stats.linregress(x[notNaNs,:],y[notNaNs]) # cannot do multiple-linear reg
#
#clf = linear_model.LinearRegression() # no statistics!!
#clf.fit(x[notNaNs],y[notNaNs])

X = x[notNaNs,:]
y = y[notNaNs]
X= sm.add_constant(X)
ols = sm.OLS(y, X).fit()
print ols.summary()
Exemple #7
0
jobgydepth = [20, 40, 60, 80, 100]
csvbiome = {1:[50, 25, 13, 7, 5],
            2:[50, 22, 13, 8, 7],
            3:[38, 22, 17, 13, 10],
            4:[41, 23, 15, 12, 9],
            5:[41, 23, 15,12, 9],
            6:[39, 22, 16, 13, 10],
            7:[46, 46, 46, 46, 46],
            8:[36, 23, 18, 13, 10]} # biome code in my xlsx. pctC from jobaggy

cutdep = 100.
filename = 'Non_peat_data_synthesis.csv'
Cave14C = prep.getCweightedD14C2(filename, cutdep=cutdep)
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])
tot1mprofid = Cave14C[np.logical_and(Cave14C[:,1]==0.,Cave14C[:,2]==100.),3]
tot1mprofidlon = prep.getvarxls(data, 'Lon', tot1mprofid, 0)
tot1mprofidlat = prep.getvarxls(data, 'Lat', tot1mprofid, 0)
sitefilename = 'sitegridid2.txt'
dum = np.loadtxt(sitefilename, delimiter=',')
profid4modeling = dum[:,2]
extraprofid = list(set(tot1mprofid) - set(profid4modeling))
sawtcfn = '..\\AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_S_SOC.nc4'
tawtcfn = '..\\AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_T_SOC.nc4'
sawtc = prep.getHWSD(sawtcfn, tot1mprofidlon, tot1mprofidlat)
tawtc = prep.getHWSD(tawtcfn, tot1mprofidlon, tot1mprofidlat)
hwsdsoc = sawtc + tawtc

#%% compare jobaggy soc vs. obs soc, linear interpolation, using pctC
out = []
obss = []
depthh = []
Exemple #8
0
            # use difference equation
           outD14C += FMtoD14C(cal_difference_FM(1./obs, atmFM, sampleyr, 30000)),
        else:
            # use prebomb_FM
            outD14C += FMtoD14C(cal_prebomb_FM(1./obs)),
    return outD14C

def C14agetoD14C(age):
    ''' calculate D14C with standard corrected to 1950
    '''
    return (np.exp(-age/8033.)-1.)*1000.
    
    
#%% 
if __name__ == "__main__":
    import D14Cpreprocess as prep
    import pandas as pd
    
    filename = 'Non_peat_data_synthesis.csv'
    Cave14C = prep.getCweightedD14C2(filename)
    data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
    profid = Cave14C[:,3]
    d14C = prep.getvarxls(data,'D14C_BulkLayer', profid, ':')
    sampleyr = prep.getvarxls(data, 'SampleYear', profid, ':')
    
    print 'Run cal_tau to get turnover time for each soil layer...'
    n0 = 40
    nend = 60
    #tau, cost = cal_tau(d14C[n0:nend], sampleyr[n0:nend])
    tau, cost = cal_tau(d14C, sampleyr)
    print 'tau, cost are the calculated turnover and cost'