Beispiel #1
0
tropical = glob('../data/TokunagaData_[1-3]_*.csv')
temperate = glob('../data/TokunagaData_8_*.csv')
temperate += glob('../data/TokunagaData_11_*.csv')
temperate += glob('../data/TokunagaData_14_*.csv')
cold = glob('../data/TokunagaData_17_*.csv')
cold += glob('../data/TokunagaData_21_*.csv')
cold += glob('../data/TokunagaData_25_*.csv')

labels = ['Arid', 'Tropical', 'Temperate', 'Cold']
All_Data = []

for i, data in enumerate([arid, tropical, temperate, cold]):
    Ds = []
    for filename in data:

        _, strahler_data, lengths = toku.read_toku_data(filename)

        Rr_mean, Rr_std = toku.calc_Rr(strahler_data, lengths)
        Rb_mean, Rb_std = toku.calc_Rb(strahler_data)

        if Rr_mean < 1.05:
            continue

        D = toku.D(Rb_mean, Rr_mean)
        Ds.append(D)
        All_Data.append(D)

    print('{} Mean: {} StdDev: {}'.format(labels[i], np.mean(Ds), np.std(Ds)))
print('All Data Mean: {} StdDev: {}'.format(np.mean(All_Data),
                                            np.std(All_Data)))
Beispiel #2
0
import tokunaga_fns as toku
from glob import glob
import json
import sys

# Load our data, into lists of filenames catgorized by climate zone
data = glob('/data/Geog-c2s2/toku/*/TokunagaData_*.csv')

# Make sure no wierd filesystem stuff messes up the order of the file list we are slicing
data.sort()

data_dict = {}

start = int(sys.argv[1])
end = int(sys.argv[2])

for filename in data[start:end]:
    toku_id = filename.split('TokunagaData_')[1][:-4]
    toku_data, strahler_data, _ = toku.read_toku_data(filename)

    r_sq, a, c = toku.fit_a_and_c(toku_data, strahler_data)

    data_dict[toku_id] = [r_sq, a, c]

with open('/data/Geog-c2s2/a_c_data/a_c_data_{}_{}.json'.format(start, end),
          'w') as fp:
    json.dump(data_dict, fp)

print('Done')
    basin_data = json.load(js)

with open('../data/ac_data.json') as js:
    ac_data = json.load(js)

threshold = float(sys.argv[1])


c_vals = []
chan_density = []

for toku_id, values in ac_data.items():
    if values[0] > threshold and toku_id in basin_data:
        basin = basin_data[toku_id]

        c_vals.append(values[2])

        _, _, lengths = read_toku_data('../data/TokunagaData_{}.csv'.format(toku_id))

        total_length = np.sum(lengths)

        area = basin[3]
        unchan_area = basin[0]

        chan_density.append(total_length / (area - unchan_area))


# plt.scatter()
plt.hist2d(c_vals, np.log10(chan_density), bins=(50, 50), cmap=plt.cm.viridis, density=False)
plt.show()