import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
from mpl_toolkits.basemap import Basemap, cm
import mynetCDF as mync
from netCDF4 import Dataset
import myplot as myplt
import mystats as mysm

#%% plot all profiles
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
profid = data.index.unique()
lons = prep.getvarxls(data,'Lon',profid,0)
lats = prep.getvarxls(data,'Lat',profid,0)
fig = plt.figure()
ax = fig.add_axes([0.05,0.05,0.9,0.9])
m = Basemap(llcrnrlon=-180,llcrnrlat=-60,urcrnrlon=180,urcrnrlat=80,projection='mill',lon_0=0,lat_0=0)
lon, lat = np.meshgrid(lons, lats)
x, y = m(lons,lats)
m.drawcoastlines(linewidth=0.25)
m.drawcountries(linewidth=0.25)
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='grey',lake_color='#99ffff',zorder=0)
m.scatter(lons,lats,15,marker='^',color='r',alpha=0.7,latlon=True)
# draw parallels.
parallels = np.arange(-90.,90.,30.)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
# draw meridians
Exemple #2
0
@author: Yujie
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import pylab
import C14tools
import matplotlib

#%% fill Layer_top, Layer_bottom using Layer_top_norm Layer_bottom_norm
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1]) 
newdata = prep.get_originalLayerdepth(data)
newdata.to_csv('Non_peat_data_synthesis2.csv',encoding='iso-8859-1')
#%% plot 14C and SOC profile at each site
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1]) 
pid = data[data['Start_of_Profile']==1].index # index of profile start
d14C = prep.getvarxls(data,'D14C_BulkLayer', pid, ':')
sampleyr = prep.getvarxls(data, 'SampleYear', pid, ':')
tau, cost = C14tools.cal_tau(d14C, sampleyr)
data['tau'] = pd.Series(tau[:,0], index=data.index)

#%% plot individual profile
fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(16,10))
for fign in range(15):
    i = fign + 135
    ax1 = fig.axes[fign]
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 12 21:43:00 2015

@author: Yujie
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import mynetCDF as mync
from netCDF4 import Dataset
#%% ---------  check if data are utf-8  --------------------------
filename = 'Non_peat_data_synthesis.csv'
prep.sweepdata(filename)

#%% -------------  run get profile for 14C modeling  --------------
modeldim = (96.0, 96.0) # lat and lon dimension of the ESM
prep.getprofile4modeling(filename,*modeldim,cutdep=100.0,outfilename='sitegridid2.txt')

dum = np.loadtxt('sitegridid.txt',delimiter=',')
prep.plotDepth_cumC(filename,dum[:,2])

# write information of profiles used in modeling to csv. For table in paper
pltorisitesonly = 0
filename = 'sitegridid2.txt'
data = np.loadtxt(filename,unpack=True,delimiter=',')[:,:].T
mdlprofid = data[:,2].astype(float)
if pltorisitesonly == 0: # including extra sites
    filename = 'extrasitegridid.txt'
    data = np.loadtxt(filename,unpack=True,delimiter=',')[:,:].T
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import pylab

filename = 'tot48prof.txt'
dum = np.loadtxt(filename, delimiter=',')
profid = dum[:,2]
#%% plot cumuSOC vs cumuC-averaged D14C
cm = plt.get_cmap('gist_rainbow')
numcolr = len(profid) # no repeat in color
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8,6))
axes.set_color_cycle([cm(1.*jj/numcolr) for jj in range(numcolr)])
for i in profid:
    data = prep.getprofSOCD14C_interp('Non_peat_data_synthesis.csv',i,cutdep=100.)
    cumuC = np.cumsum(data[:,2])
    cumufracC = cumuC/cumuC[-1]
    cumuD14C = np.cumsum(data[:,1] * data[:,2])/cumuC
    axes.plot(cumufracC, cumuD14C, ':', lw=2)
axes.set_xlabel('Cumulative SOC fraction (%)')
axes.set_ylabel(r"Cumulative $\Delta^{14}C$ ("+ u"\u2030)")

#%% plot cumuSOC vs  D14C
cm = plt.get_cmap('gist_rainbow')
numcolr = len(profid) # no repeat in color
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8,6))
axes.set_color_cycle([cm(1.*jj/numcolr) for jj in range(numcolr)])
for i in profid:
    if i==144.:
        continue
Exemple #5
0
            besttau.append(taufast[np.argmin(np.asarray(cost))])
        else:
            # use prebomb_FM
            for n, tau in enumerate(tauslow):
                #print 'tauslow: %d, #%d'%(tau, n)
                dum = obs - FMtoD14C(cal_prebomb_FM(1./tau))
                cost.append(dum**2)
            besttau.append(tauslow[np.argmin(np.asarray(cost))])
    return besttau
    
#%% test cal_tau
import D14Cpreprocess as prep
import pandas as pd
import C14tools
filename = 'Non_peat_data_synthesis.csv'
Cave14C = prep.getCweightedD14C2(filename)
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
profid = Cave14C[:,3]
d14C = prep.getvarxls(data,'D14C_BulkLayer', profid, ':')
sampleyr = prep.getvarxls(data, 'SampleYear', profid, ':')

n0 = 40
nend = 60
%timeit tau, cost = C14tools.cal_tau(d14C[n0:nend], sampleyr[n0:nend])

#%%
import numba as nb
#@nb.jit(nb.f8(nb.f8[:]))
#@nb.autojit
def summ(arr):
    summ = 0.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import scipy.stats as stats
import statsmodels.api as sm
import mystats as mysm
import myplot
import pylab
import random
from collections import Counter
#%% prepare data
# get 14C and SOC of total profiles
filename = 'Non_peat_data_synthesis.csv'
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
layerbot = prep.getvarxls(data, 'Layer_bottom_norm', data.index.unique(), -1)
plt.hist(layerbot, 60)
cutdep = 40.
Cave14C = prep.getCweightedD14C2(filename, cutdep=cutdep)
nprof = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep)].shape[0] # n = 138
totprofid = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep), 3]
totprof14C = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep), 4]
totprofSOC = Cave14C[(Cave14C[:,1]==0) & (Cave14C[:,2]==cutdep), 5]
totprofveg = prep.getvarxls(data,'VegTypeCode_Local',totprofid,0)
dum = list(totprofveg); Counter(dum)

# get 14C and SOC of profiles selected for modeling
pltorisitesonly = 0
filename = 'sitegridid2.txt'
data = np.loadtxt(filename,unpack=True,delimiter=',')[:,:].T
mdlprofid = data[:,2].astype(float)
Exemple #7
0
@author: Yujie
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import D14Cpreprocess as prep
import scipy.stats as stats
from sklearn import linear_model
import statsmodels.api as sm
import mystats as mysm
import myplot
import scipy.io
import C14tools
#%% linear regression of Cave14C. MAT, MAP
filename = 'Non_peat_data_synthesis.csv'
Cave14C = prep.getCweightedD14C2(filename,cutdep=40.)
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])  
mat = prep.getvarxls(data,'MAT',Cave14C[~np.isnan(Cave14C[:,4]),3],0)
mapp = prep.getvarxls(data,'MAP',Cave14C[~np.isnan(Cave14C[:,4]),3],0)

x = np.c_[mat.astype(float),mapp.astype(float)]
y = Cave14C[~np.isnan(Cave14C[:,4]),4]
notNaNs = ~np.any(np.isnan(x),1) & ~np.isnan(y)
#stats.linregress(x[notNaNs,:],y[notNaNs]) # cannot do multiple-linear reg
#
#clf = linear_model.LinearRegression() # no statistics!!
#clf.fit(x[notNaNs],y[notNaNs])

X = x[notNaNs,:]
y = y[notNaNs]
X= sm.add_constant(X)
import pylab
from matplotlib.ticker import FuncFormatter

# All profiles
fromsitegrididfile = 1
if fromsitegrididfile == 1:
    filename = 'sitegridid2.txt'
    dum = np.loadtxt(filename, delimiter=',')
    proftotSOC = dum[:,6]
    profD14C= dum[:,5]   
    proflon = dum[:,0]
    proflat = dum[:,1]
else:
    filename = 'Non_peat_data_synthesis.csv'
    cutdep = 100.
    profdata = prep.getCweightedD14C2(filename,cutdep=cutdep)
    proftotSOC = profdata[profdata[:,2]==cutdep,5]
    profD14C = profdata[profdata[:,2]==cutdep,4]
# HWSD
sawtcfn = '..\\AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_S_SOC.nc4'
tawtcfn = '..\\AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_T_SOC.nc4'
ncfid = Dataset(sawtcfn, 'r')
nc_attrs, nc_dims, nc_vars = mync.ncdump(ncfid)
sawtc = ncfid.variables['SUM_s_c_1'][:]
ncfid = Dataset(tawtcfn, 'r')
nc_attrs, nc_dims, nc_vars = mync.ncdump(ncfid)
hwsdlat = ncfid.variables['lat'][:]
hwsdlon = ncfid.variables['lon'][:]
tawtc = ncfid.variables['SUM_t_c_12'][:]
hwsdsoc = sawtc + tawtc
hwsdsoc = np.ravel(hwsdsoc)
Exemple #9
0
from scipy.interpolate import interp1d
import mystats as myst

jobgydepth = [20, 40, 60, 80, 100]
csvbiome = {1:[50, 25, 13, 7, 5],
            2:[50, 22, 13, 8, 7],
            3:[38, 22, 17, 13, 10],
            4:[41, 23, 15, 12, 9],
            5:[41, 23, 15,12, 9],
            6:[39, 22, 16, 13, 10],
            7:[46, 46, 46, 46, 46],
            8:[36, 23, 18, 13, 10]} # biome code in my xlsx. pctC from jobaggy

cutdep = 100.
filename = 'Non_peat_data_synthesis.csv'
Cave14C = prep.getCweightedD14C2(filename, cutdep=cutdep)
data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID', skiprows=[1])
tot1mprofid = Cave14C[np.logical_and(Cave14C[:,1]==0.,Cave14C[:,2]==100.),3]
tot1mprofidlon = prep.getvarxls(data, 'Lon', tot1mprofid, 0)
tot1mprofidlat = prep.getvarxls(data, 'Lat', tot1mprofid, 0)
sitefilename = 'sitegridid2.txt'
dum = np.loadtxt(sitefilename, delimiter=',')
profid4modeling = dum[:,2]
extraprofid = list(set(tot1mprofid) - set(profid4modeling))
sawtcfn = '..\\AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_S_SOC.nc4'
tawtcfn = '..\\AncillaryData\\HWSD\\Regridded_ORNLDAAC\\AWT_T_SOC.nc4'
sawtc = prep.getHWSD(sawtcfn, tot1mprofidlon, tot1mprofidlat)
tawtc = prep.getHWSD(tawtcfn, tot1mprofidlon, tot1mprofidlat)
hwsdsoc = sawtc + tawtc

#%% compare jobaggy soc vs. obs soc, linear interpolation, using pctC