def __init__(self, modern_dem_name, outlet_id, chi_mask_dem_name=None, from_file=None): """Initialize MetricCalculator with names of postglacial and modern DEMs.""" if from_file is None: # Read and remember the modern DEM (whether data or model) (self.grid, self.z) = self.read_topography(modern_dem_name) #print self.grid.x_of_node self.grid.set_watershed_boundary_condition_outlet_id(outlet_id, self.z, nodata_value=-9999) # Instantiate and run a FlowRouter and lake filler, so we get # drainage area for cumulative-area statistic, and also fields for chi. fr = FlowRouter(self.grid) dfr = DepressionFinderAndRouter(self.grid) fr.route_flow() dfr.map_depressions() # Remember modern drainage area grid self.area = self.grid.at_node['drainage_area'] # Instantiate a ChiFinder for chi-index self.chi_finder = ChiFinder(self.grid, min_drainage_area=10000., reference_concavity=0.5) core_nodes = np.zeros(self.area.shape, dtype=bool) core_nodes[self.grid.core_nodes] = True # Read and remember the MASK, if provided if chi_mask_dem_name is None: self.mask = (self.area>1e5) self.till_mask = np.zeros(self.mask.shape, dtype=bool) self.till_mask[self.grid.core_nodes] = 1 else: (self.mask_grid, zmask) = self.read_topography(chi_mask_dem_name) mask = (zmask>0)*1 self.mask = (self.area>1e5)*(mask==1) mask_bool = (zmask>0) self.till_mask = np.zeros(self.mask.shape, dtype=bool) self.till_mask[mask_bool*core_nodes] = 1 # # Create dictionary to contain metrics self.metric = {} else: with open(from_file, 'r') as f: metrics = load(f) self.modern_dem_name = metrics.pop('Topo file') self.metric = metrics fn_split = from_file.split('.') fn_split[-1] = 'chi' fn_split.append('txt') chi_filename = '.'.join(fn_split) self.density_chi = np.loadtxt(chi_filename)
def test_track_source(): """Unit tests for track_source(). """ grid = RasterModelGrid((5, 5), spacing=(1., 1.)) grid.at_node['topographic__elevation'] = np.array([5., 5., 5., 5., 5., 5., 4., 5., 1., 5., 0., 3., 5., 3., 0., 5., 4., 5., 2., 5., 5., 5., 5., 5., 5.]) grid.status_at_node[10] = 0 grid.status_at_node[14] = 0 fr = FlowRouter(grid) fr.route_flow() r = grid.at_node['flow__receiver_node'] assert_equal(r[6], 10) assert_equal(r[7], 8) assert_equal(r[18], 14) hsd_ids = np.empty(grid.number_of_nodes, dtype=int) hsd_ids[:] = 1 hsd_ids[2:5] = 0 hsd_ids[7:10] = 0 (hsd_upstr, flow_accum) = track_source(grid, hsd_ids) assert_equal(hsd_upstr[8], [1, 0, 0]) assert_equal(hsd_upstr[14], [1, 1, 1, 1, 0, 0, 1]) assert_equal(flow_accum[14], 7)
def test_find_unique_upstream_hsd_ids_and_fractions(): """Unit tests find_unique_upstream_hsd_ids_and_fractions(). """ grid = RasterModelGrid((5, 5), spacing=(1., 1.)) grid.at_node['topographic__elevation'] = np.array([ 5., 5., 5., 5., 5., 5., 4., 5., 1., 5., 0., 3., 5., 3., 0., 5., 4., 5., 2., 5., 5., 5., 5., 5., 5. ]) grid.status_at_node[10] = 0 grid.status_at_node[14] = 0 fr = FlowRouter(grid) fr.route_flow() hsd_ids = np.empty(grid.number_of_nodes, dtype=int) hsd_ids[:] = 1 hsd_ids[2:5] = 0 hsd_ids[7:10] = 0 (hsd_upstr, flow_accum) = track_source(grid, hsd_ids) (uniq_ids, coeff) = find_unique_upstream_hsd_ids_and_fractions(hsd_upstr) np.testing.assert_almost_equal(np.sort(np.array(coeff[8])), np.array([0.33333333, 0.66666667]))
def test_find_unique_upstream_hsd_ids_and_fractions(): """Unit tests find_unique_upstream_hsd_ids_and_fractions(). """ grid = RasterModelGrid((5, 5), spacing=(1., 1.)) grid.at_node['topographic__elevation'] = np.array([5., 5., 5., 5., 5., 5., 4., 5., 1., 5., 0., 3., 5., 3., 0., 5., 4., 5., 2., 5., 5., 5., 5., 5., 5.]) grid.status_at_node[10] = 0 grid.status_at_node[14] = 0 fr = FlowRouter(grid) fr.route_flow() hsd_ids = np.empty(grid.number_of_nodes, dtype=int) hsd_ids[:] = 1 hsd_ids[2:5] = 0 hsd_ids[7:10] = 0 (hsd_upstr, flow_accum) = track_source(grid, hsd_ids) (uniq_ids, coeff) = find_unique_upstream_hsd_ids_and_fractions(hsd_upstr) np.testing.assert_almost_equal( np.sort(np.array(coeff[8])), np.array([0.33333333, 0.66666667]))
def test_track_source(): """Unit tests for track_source(). """ grid = RasterModelGrid((5, 5), spacing=(1., 1.)) grid.at_node['topographic__elevation'] = np.array([ 5., 5., 5., 5., 5., 5., 4., 5., 1., 5., 0., 3., 5., 3., 0., 5., 4., 5., 2., 5., 5., 5., 5., 5., 5. ]) grid.status_at_node[10] = 0 grid.status_at_node[14] = 0 fr = FlowRouter(grid) fr.route_flow() r = grid.at_node['flow__receiver_node'] assert r[6] == 10 assert r[7] == 8 assert r[18] == 14 hsd_ids = np.empty(grid.number_of_nodes, dtype=int) hsd_ids[:] = 1 hsd_ids[2:5] = 0 hsd_ids[7:10] = 0 (hsd_upstr, flow_accum) = track_source(grid, hsd_ids) assert hsd_upstr[8] == [1, 0, 0] assert hsd_upstr[14] == [1, 1, 1, 1, 0, 0, 1] assert flow_accum[14] == 7
side_length = 4 #m, block side length tau_c_br = 10 #Pa, for now a1 = 6.5 a2 = 2.5 d = 0.1 #z0 tol = 0.01 #m water thickness error allowable elapsed_time = 0. keep_running = True counter = 0 # simple incremented counter to let us see the model advance while keep_running: if elapsed_time + dt > runtime: dt = runtime - elapsed_time keep_running = False #route flow to determine water volume flux in each cell _ = fr.route_flow() # route_flow isn't time sensitive, so it doesn't take dt as input #lake filling stuff here try: _ = lf.map_depressions() #_ = lf.route_flow() #route flow given that lakes may exist except AssertionError: print 'Be careful, depression filler took too many iterations' print counter print np.amin(mg['node']['topographic__elevation']) print '-----' #test = mg['node']['upstream_ID_order'] #sys.exit('f**k you') #turn volume flux into specific discharge by dividing by channel width water_vol_flux = mg['node']['water__volume_flux'] #/ 365 / 24 / 3600 #drainage area times rain rate water_specific_q = water_vol_flux / channel_width
def test_composite_pits(): """ A test to ensure the component correctly handles cases where there are multiple pits, inset into each other. """ mg = RasterModelGrid(10, 10, 1.) z = mg.add_field("node", "topographic__elevation", mg.node_x.copy()) # a sloping plane # np.random.seed(seed=0) # z += np.random.rand(100)/10000. # punch one big hole z.reshape((10, 10))[3:8, 3:8] = 0. # dig a couple of inset holes z[57] = -1. z[44] = -2. z[54] = -10. # make an outlet z[71] = 0.9 fr = FlowRouter(mg) lf = DepressionFinderAndRouter(mg) fr.route_flow() lf.map_depressions() flow_sinks_target = np.zeros(100, dtype=bool) flow_sinks_target[mg.boundary_nodes] = True # no internal sinks now: assert_array_equal(mg.at_node["flow__sink_flag"], flow_sinks_target) # test conservation of mass: assert mg.at_node["drainage_area"].reshape((10, 10))[1:-1, 1].sum() == approx( 8. ** 2 ) # ^all the core nodes # test the actual flow field: # nA = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # 8., 8., 7., 6., 5., 4., 3., 2., 1., 0., # 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # 1., 1., 1., 4., 2., 2., 8., 4., 1., 0., # 1., 1., 1., 8., 3., 15., 3., 2., 1., 0., # 1., 1., 1., 13., 25., 6., 3., 2., 1., 0., # 1., 1., 1., 45., 3., 3., 5., 2., 1., 0., # 50., 50., 49., 3., 2., 2., 2., 4., 1., 0., # 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) nA = np.array( [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 8., 8., 7., 6., 5., 4., 3., 2., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 4., 2., 2., 6., 4., 1., 0., 1., 1., 1., 6., 3., 12., 3., 2., 1., 0., 1., 1., 1., 8., 20., 4., 3., 2., 1., 0., 1., 1., 1., 35., 5., 4., 3., 2., 1., 0., 50., 50., 49., 13., 10., 8., 6., 4., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., ] ) assert_array_equal(mg.at_node["drainage_area"], nA) # the lake code map: lc = np.array( [ XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, ] ) # test the remaining properties: assert lf.lake_outlets.size == 1 assert lf.lake_outlets[0] == 72 outlets_in_map = np.unique(lf.depression_outlet_map) assert outlets_in_map.size == 2 assert outlets_in_map[1] == 72 assert lf.number_of_lakes == 1 assert lf.lake_codes[0] == 57 assert_array_equal(lf.lake_map, lc) assert lf.lake_areas[0] == approx(25.) assert lf.lake_volumes[0] == approx(63.)
def test_three_pits(): """ A test to ensure the component correctly handles cases where there are multiple pits. """ mg = RasterModelGrid(10, 10, 1.) z = mg.add_field("node", "topographic__elevation", mg.node_x.copy()) # a sloping plane # np.random.seed(seed=0) # z += np.random.rand(100)/10000. # punch some holes z[33] = 1. z[43] = 1. z[37] = 4. z[74:76] = 1. fr = FlowRouter(mg) lf = DepressionFinderAndRouter(mg) fr.route_flow() lf.map_depressions() flow_sinks_target = np.zeros(100, dtype=bool) flow_sinks_target[mg.boundary_nodes] = True # no internal sinks now: assert_array_equal(mg.at_node["flow__sink_flag"], flow_sinks_target) # test conservation of mass: assert mg.at_node["drainage_area"].reshape((10, 10))[1:-1, 1].sum() == approx( 8. ** 2 ) # ^all the core nodes # test the actual flow field: nA = np.array( [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 8., 8., 7., 6., 5., 4., 3., 2., 1., 0., 2., 2., 1., 1., 2., 1., 1., 1., 1., 0., 26., 26., 25., 15., 11., 10., 9., 8., 1., 0., 2., 2., 1., 9., 2., 1., 1., 1., 1., 0., 2., 2., 1., 1., 5., 4., 3., 2., 1., 0., 2., 2., 1., 1., 1., 1., 3., 2., 1., 0., 20., 20., 19., 18., 17., 12., 3., 2., 1., 0., 2., 2., 1., 1., 1., 1., 3., 2., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., ] ) assert_array_equal(mg.at_node["drainage_area"], nA) # test a couple more properties: lc = np.empty(100, dtype=int) lc.fill(XX) lc[33] = 33 lc[43] = 33 lc[37] = 37 lc[74:76] = 74 assert_array_equal(lf.lake_map, lc) assert_array_equal(lf.lake_codes, [33, 37, 74]) assert lf.number_of_lakes == 3 assert lf.lake_areas == approx([2., 1., 2.]) assert lf.lake_volumes == approx([2., 2., 4.])
def test_degenerate_drainage(): """ This "hourglass" configuration should be one of the hardest to correctly re-route. """ mg = RasterModelGrid(9, 5) z_init = mg.node_x.copy() * 0.0001 + 1. lake_pits = np.array([7, 11, 12, 13, 17, 27, 31, 32, 33, 37]) z_init[lake_pits] = -1. z_init[22] = 0. # the common spill pt for both lakes z_init[21] = 0.1 # an adverse bump in the spillway z_init[20] = -0.2 # the spillway z = mg.add_field("node", "topographic__elevation", z_init) fr = FlowRouter(mg) lf = DepressionFinderAndRouter(mg) fr.route_flow() lf.map_depressions() # correct_A = np.array([ 0., 0., 0., 0., 0., # 0., 1., 3., 1., 0., # 0., 5., 1., 2., 0., # 0., 1., 10., 1., 0., # 21., 21., 1., 1., 0., # 0., 1., 9., 1., 0., # 0., 3., 1., 2., 0., # 0., 1., 1., 1., 0., # 0., 0., 0., 0., 0.]) correct_A = np.array( [ 0., 0., 0., 0., 0., 0., 1., 3., 1., 0., 0., 2., 4., 2., 0., 0., 1., 10., 1., 0., 21., 21., 1., 1., 0., 0., 1., 9., 1., 0., 0., 2., 2., 2., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., ] ) thelake = np.concatenate((lake_pits, [22])).sort() assert mg.at_node["drainage_area"] == approx(correct_A)
def test_edge_draining(): """ This tests when the lake attempts to drain from an edge, where an issue is suspected. """ # Create a 7x7 test grid with a well defined hole in it, AT THE EDGE. mg = RasterModelGrid((7, 7), (1., 1.)) z = mg.node_x.copy() guard_sides = np.concatenate((np.arange(7, 14), np.arange(35, 42))) edges = np.concatenate((np.arange(7), np.arange(42, 49))) hole_here = np.array(([15, 16, 22, 23, 29, 30])) z[guard_sides] = z[13] z[edges] = -2. # force flow outwards from the tops of the guards z[hole_here] = -1. A_new = np.array( [ [ [ 0., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 0., 15., 5., 4., 3., 2., 1., 0., 0., 10., 4., 3., 2., 1., 0., 0., 1., 4., 3., 2., 1., 0., 0., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 0., ] ] ] ).flatten() depr_outlet_target = np.array( [ XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, 14, 14, XX, XX, XX, XX, XX, 14, 14, XX, XX, XX, XX, XX, 14, 14, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, ] ).flatten() mg.add_field("node", "topographic__elevation", z, units="-") fr = FlowRouter(mg) lf = DepressionFinderAndRouter(mg) fr.route_flow() lf.map_depressions() assert mg.at_node["drainage_area"] == approx(A_new) assert lf.depression_outlet_map == approx(depr_outlet_target)
extend_t[0:lat.shape[0] - 1, lon.shape[0] * 2:lon.shape[0] * 3] = ice_topo_f[0:lat.shape[0] - 1, :] # Top right extend_t[lat.shape[0] - 1:lat.shape[0] * 2 - 2, 0:lon.shape[0] * 3] = np.fliplr( np.flipud(extend_t[0:lat.shape[0] - 1, 0:lon.shape[0] * 3])) # pad bottom # Create raster grid for flow calculation and add data fg = RasterModelGrid((extend_t.shape[0], extend_t.shape[1]), spacing=(1, 1)) _ = fg.add_field('node', 'topographic__elevation', extend_t) fg.set_closed_boundaries_at_grid_edges(False, False, False, False) # Calculate flow fields fr = FlowRouter(fg) fg = fr.route_flow() # Preview the flow field with this plotting func. # drainage_plot(fg, title='Grid 2 using FlowDirectorD8') # Fill in Lakes/depressions fg.at_node['flow__sink_flag'][ fg.core_nodes].sum() # how many depressions do we have? hf = SinkFiller(fg, apply_slope=True) hf.run_one_step() fr.run_one_step() # drainage_plot(fg, title='Grid 2 using FlowDirectorD8') # Output is a single vector flow_rec = fg.at_node['flow__receiver_node'] # Convert to a 'tocell' field
side_length = 4 #m, block side length tau_c_br = 10 #Pa, for now a1 = 6.5 a2 = 2.5 d = 0.1 #z0 tol = 0.01 #m water thickness error allowable elapsed_time = 0. keep_running = True counter = 0 # simple incremented counter to let us see the model advance while keep_running: if elapsed_time + dt > runtime: dt = runtime - elapsed_time keep_running = False #route flow to determine water volume flux in each cell _ = fr.route_flow( ) # route_flow isn't time sensitive, so it doesn't take dt as input #lake filling stuff here try: _ = lf.map_depressions() #_ = lf.route_flow() #route flow given that lakes may exist except AssertionError: print 'Be careful, depression filler took too many iterations' print counter print np.amin(mg['node']['topographic__elevation']) print '-----' #test = mg['node']['upstream_ID_order'] #sys.exit('f**k you') #turn volume flux into specific discharge by dividing by channel width water_vol_flux = mg['node'][ 'water__volume_flux'] #/ 365 / 24 / 3600 #drainage area times rain rate
# Display initialization message print('Running ...') #instantiate the components: pr = PrecipitationDistribution(input_file) fr = Flow(mg) sp = Fsc(mg, input_file) hd = Diff(mg, input_file) ####################RUN track_uplift = 0 #track cumulative uplift to know top of hard layer last_trunc = runtime for (interval_duration, rainfall_rate) in pr.yield_storm_interstorm_duration_intensity(): if rainfall_rate != 0.: # note diffusion also only happens when it's raining... _ = fr.route_flow() _ = sp.erode(mg, interval_duration, K_if_used='K_values') _ = hd.diffuse(interval_duration) track_uplift += uplift_rate * interval_duration #top of beginning surface mg.at_node['topographic__elevation'][mg.core_nodes] += uplift_rate * interval_duration this_trunc = pr.elapsed_time // t_plot if this_trunc != last_trunc: # time to plot a new profile! print ('Time %d' % (t_plot * this_trunc)) last_trunc = this_trunc else: pass #check where hard rocks and soft rocks are, change k to reflect this if hard_layer_on_or_off == 1: #if using layers hard_layer = np.where(mg.at_node['topographic__elevation'] >= track_uplift - hard_layer_thickness) soft_layer = np.where(mg.at_node['topographic__elevation'] < track_uplift - hard_layer_thickness)
def extend_perturbed_runs(total_iters_to_reach=0): """Load all perturbed runs in current folder, and extend them. Function should be called from within an experiment folder (extend all perturbations for all starting uplift rates), an 'uplift_rate_XXXX' folder (extend all perturbations for this rate) or an 'accel_XXX' folder (extend this accel only). Does NOT create a new expt or run ID, just extends the old ones. Adds a text file annotating what has happened. """ # look for the params to use. Also tells us where we are in the hierarchy level = 0 # 0: top, 1: uplift, 2: accel: cwd = os.getcwd() while True: try: paramdict = np.load('expt_ID_paramdict.npy').item() except IOError: os.chdir('..') level += 1 else: break # now back to where we started in the dir str: os.chdir(cwd) if level == 2: # in accel_ folder # get the accel that this is: accel_factors = [ get_float_of_folder_name(), ] # get the U of the host folder: uplift_rates = [ get_float_of_folder_name(directory=(cwd + '/..')), ] wd_stub = os.path.abspath(os.getcwd() + '/../..') elif level == 1: # in uplift_ folder accel_fnames = [ filename for filename in os.listdir('.') if filename.startswith('accel_') ] accel_factors = [ get_float_of_folder_name(directory=(cwd + '/' + filename)) for filename in accel_fnames ] uplift_rates = [ get_float_of_folder_name(), ] wd_stub = os.path.abspath(os.getcwd() + '/..') elif level == 0: # in top folder uplift_fnames = [ filename for filename in os.listdir('.') if filename.startswith('uplift_rate_') ] uplift_rates = [ get_float_of_folder_name(directory=(cwd + '/' + filename)) for filename in uplift_fnames ] accel_factors = paramdict['accel_factors'] wd_stub = os.path.abspath(os.getcwd()) for uplift_rate in uplift_rates: for accel_factor in accel_factors: wd = (wd_stub + '/uplift_rate_' + str(uplift_rate) + '/accel_' + str(accel_factor)) # get the saved filenames that already exist in this folder: runnames = [ filename for filename in os.listdir(wd) if filename.startswith('topographic__elevation') ] seddepthnames = [ filename for filename in os.listdir(wd) if filename.startswith('channel_sediment__depth') ] # as elsewhere, the final entry is the last run, so -- # establish the loop number of that run: run_ID = runnames[-1][-14:-4] # is a str _format = 0 while True: char = runnames[-1][-16 - _format] try: num = int(char) except ValueError: # was a str break else: _format += 1 finaliter = int(runnames[-1][(-15 - _format):-15]) finalsediter = int(seddepthnames[-1][(-15 - _format):-15]) assert finaliter == finalsediter # ...just in case # test we need to actually do more runs: if total_iters_to_reach < finaliter + paramdict['out_interval']: continue # check we aren't going to have a "zero problem"; correct if we do max_zeros = len(str(total_iters_to_reach)) if max_zeros + 1 > _format: # less won't be possible from continue extra_zeros = max_zeros + 1 - _format for allfile in os.listdir(wd): if allfile[-14:-4] == run_ID: os.rename( wd + '/' + allfile, (wd + '/' + allfile[:(-15 - _format)] + '0' * extra_zeros + allfile[(-15 - _format):])) runnames = [ filename for filename in os.listdir(wd) if filename.startswith('topographic__elevation') ] seddepthnames = [ filename for filename in os.listdir(wd) if filename.startswith('channel_sediment__depth') ] if max_zeros + 1 < _format: max_zeros = _format - 1 # in case of any bonus 0s from old run # build the structures: mg = RasterModelGrid(paramdict['shape'], paramdict['dx']) for edge in (mg.nodes_at_left_edge, mg.nodes_at_top_edge, mg.nodes_at_right_edge): mg.status_at_node[edge] = CLOSED_BOUNDARY z = mg.add_zeros('node', 'topographic__elevation') seddepth = mg.add_zeros('node', 'channel_sediment__depth') fr = FlowRouter(mg) eroder = SedDepEroder(mg, **paramdict) ld = LinearDiffuser(mg, **paramdict) # load the last available elev data: z[:] = np.loadtxt(wd + '/' + runnames[-1]) seddepth[:] = np.loadtxt(wd + '/' + seddepthnames[-1]) # save a note try: appendfile = open(wd + '/appended_run_readme.txt', 'a') except IOError: appendfile = open(wd + '/appended_run_readme.txt', 'w') appendfile.write('This run was appended at timestamp ' + str(int(time.time())) + '.\n') appendfile.write('New loops were added from iteration ' + str(finaliter) + ' and terminated at iteration ' + str(total_iters_to_reach) + '.\n\n') appendfile.close() # get runnin' print('Extending uplift ' + str(uplift_rate) + ' accel ' + str(accel_factor) + ' from iter number ' + str(finaliter)) dt = paramdict['dt'] for i in xrange(finaliter + 1, total_iters_to_reach): fr.route_flow() eroder.run_one_step(dt) ld.run_one_step(dt) z[mg.core_nodes] += accel_factor * uplift_rate * dt print(i) if i % out_interval == 0: zeros_to_add = max_zeros - len(str(i)) + 1 # note an OoM buffer! Just to be safe if zeros_to_add < 0: # ...just in case, though should never happen print('Problem allocating zeros on savefiles') ilabel = '0' * zeros_to_add + str(i) identifier = ilabel + '_' + str(run_ID) for field in out_fields: np.savetxt( wd + '/' + field + '_' + identifier + '.txt', mg.at_node[field])
z = mg.add_zeros('node', 'topographic__elevation') z += np.random.rand(mg.number_of_nodes) mg.status_at_node[mg.nodes_at_left_edge] = CLOSED_BOUNDARY mg.status_at_node[mg.nodes_at_right_edge] = CLOSED_BOUNDARY fr = FlowRouter(mg) sp = FastscapeEroder(mg, K_sp=1.e-5) sf = SteepnessFinder(mg, min_drainage_area=1.e5) dt = 20000. for i in xrange(100): print(i) fr.route_flow() sp.run_one_timestep(dt) mg.at_node['topographic__elevation'][mg.core_nodes] += 1. sf.calculate_steepnesses() edges = mg.ones('node', dtype=bool) edges.reshape(mg.shape)[2:-2, 2:-2] = False steepness_mask = np.logical_or(sf.hillslope_mask, edges) steepnesses = np.ma.array(mg.at_node['channel__steepness_index'], mask=steepness_mask) imshow_grid_at_node(mg, 'topographic__elevation') imshow_grid_at_node(mg, steepnesses, color_for_closed=None, cmap='winter') plt.show() dt=10000.