def test_2_library_instances(bliss_tango_server, s1hg, s1f, s1b): s1hg.dial(1); s1hg.position(1) assert s1f.position() == 0.5 assert s1b.position() == 0.5 assert s1hg.position() == 1 dev_name, proxy = bliss_tango_server tango_s1hg = DeviceProxy("tango://localhost:12345/id00/bliss_test/s1hg") assert tango_s1hg.position == 1 assert tango_s1hg.offset == 0 s1f.velocity(0.1) s1b.velocity(0.1) eval_id = proxy.eval("(s1f.velocity(), s1b.velocity())") gevent.sleep(0.1) res = proxy.get_result(eval_id) assert decode_tango_eval(res) == (0.1, 0.1) # trigger move tango_s1hg.position = 2 gevent.sleep(0.1) assert s1hg.state() == "MOVING" s1hg.wait_move() assert pytest.approx(s1hg.position(), 2) s1hg.rmove(1) assert pytest.approx(tango_s1hg.position, 3)
def test_colorscale(file_3nob): m = molecule.load("mae", file_3nob) r = molrep.addrep(color="User2", selection="lipid", molid=m) assert molrep.get_scaleminmax(m, r) == pytest.approx((0., 0.)) with pytest.raises(ValueError): molrep.get_scaleminmax(m+1, 0) with pytest.raises(ValueError): molrep.get_scaleminmax(m, r+1) molrep.set_scaleminmax(molid=m, rep=r, scale_min=-10., scale_max=200.) assert molrep.get_scaleminmax(m, r) == pytest.approx((-10., 200.)) with pytest.raises(ValueError): molrep.set_scaleminmax(m+1, 0, 0, 12) with pytest.raises(ValueError): molrep.set_scaleminmax(m, r+1, 12, 13) with pytest.raises(RuntimeError): molrep.set_scaleminmax(m, r, scale_min=100, scale_max=0) # Test reset molrep.reset_scaleminmax(molid=m, rep=r) assert molrep.get_scaleminmax(m, r) == pytest.approx((-10., 200.)) with pytest.raises(ValueError): molrep.reset_scaleminmax(m+1, 0) with pytest.raises(ValueError): molrep.reset_scaleminmax(m, r+1) # Test changing with modrep assert molrep.modrep(m, r, scaleminmax=(2.0, 3.0)) assert molrep.get_scaleminmax(m, r) == pytest.approx((2.0, 3.0)) assert molrep.modrep(m, r, scaleminmax=[-10., -5.]) assert molrep.get_scaleminmax(m, r) == pytest.approx((-10., -5.)) molecule.delete(m)
def test_drag_and_drop(session, test_actions_page, mouse_chain, dx, dy, drag_duration): drag_target = session.find.css("#dragTarget", all=False) initial_rect = drag_target.rect initial_center = get_inview_center(initial_rect, get_viewport_rect(session)) # Conclude chain with extra move to allow time for last queued # coordinate-update of drag_target and to test that drag_target is "dropped". mouse_chain \ .pointer_move(0, 0, origin=drag_target) \ .pointer_down() \ .pointer_move(dx, dy, duration=drag_duration, origin="pointer") \ .pointer_up() \ .pointer_move(80, 50, duration=100, origin="pointer") \ .perform() # mouseup that ends the drag is at the expected destination e = get_events(session)[1] assert e["type"] == "mouseup" assert pytest.approx(e["pageX"], initial_center["x"] + dx) assert pytest.approx(e["pageY"], initial_center["y"] + dy) # check resulting location of the dragged element final_rect = drag_target.rect assert initial_rect["x"] + dx == final_rect["x"] assert initial_rect["y"] + dy == final_rect["y"]
def test_D8_D4_fill(d4_grid): """ Tests the functionality of D4 filling. """ d4_grid.lfD8.map_depressions(pits=None, reroute_flow=False) d4_grid.lfD4.map_depressions(pits=None, reroute_flow=False) assert d4_grid.lfD8.number_of_lakes == 1 assert d4_grid.lfD4.number_of_lakes == 3 correct_D8_lake_map = np.empty(7 * 7, dtype=int) correct_D8_lake_map.fill(XX) correct_D8_lake_map[d4_grid.lake_nodes] = 10 correct_D4_lake_map = correct_D8_lake_map.copy() correct_D4_lake_map[d4_grid.lake_nodes[5:]] = 32 correct_D4_lake_map[d4_grid.lake_nodes[-2]] = 38 correct_D8_depths = np.zeros(7 * 7, dtype=float) correct_D8_depths[d4_grid.lake_nodes] = 2. correct_D4_depths = correct_D8_depths.copy() correct_D4_depths[d4_grid.lake_nodes[5:]] = 4. correct_D4_depths[d4_grid.lake_nodes[-2]] = 3. assert_array_equal(d4_grid.lfD8.lake_map, correct_D8_lake_map) assert_array_equal(d4_grid.lfD4.lake_map, correct_D4_lake_map) assert d4_grid.mg1.at_node["depression__depth"] == approx(correct_D8_depths) assert d4_grid.mg2.at_node["depression__depth"] == approx(correct_D4_depths)
def test_uses_named_inputs(self): inputs = { "premise": "I always write unit tests for my code.", "hypothesis": "One time I didn't write any unit tests for my code." } archive = load_archive(self.FIXTURES_ROOT / 'decomposable_attention' / 'serialization' / 'model.tar.gz') predictor = Predictor.from_archive(archive, 'textual-entailment') result = predictor.predict_json(inputs) # Label probs should be 3 floats that sum to one label_probs = result.get("label_probs") assert label_probs is not None assert isinstance(label_probs, list) assert len(label_probs) == 3 assert all(isinstance(x, float) for x in label_probs) assert all(x >= 0 for x in label_probs) assert sum(label_probs) == approx(1.0) # Logits should be 3 floats that softmax to label_probs label_logits = result.get("label_logits") assert label_logits is not None assert isinstance(label_logits, list) assert len(label_logits) == 3 assert all(isinstance(x, float) for x in label_logits) exps = [math.exp(x) for x in label_logits] sumexps = sum(exps) for e, p in zip(exps, label_probs): assert e / sumexps == approx(p)
def test_activity_pv_head(next100pv): pv = next100pv activity_bi214 = pv.head_mass * pv.cv.head.material.mass_activity_bi214 activity_tl208 = pv.head_mass * pv.cv.head.material.mass_activity_tl208 assert pv.head_activity_bi214 == approx(activity_bi214, rel=1e-5) assert pv.head_activity_tl208 == approx(activity_tl208, rel=1e-5)
def test_int(self): within_1e6 = [(1000001, 1000000), (-1000001, -1000000)] for a, x in within_1e6: assert a == approx(x, rel=5e-6, abs=0) assert a != approx(x, rel=5e-7, abs=0) assert approx(x, rel=5e-6, abs=0) == a assert approx(x, rel=5e-7, abs=0) != a
def test_stratified_splitter(test_specs, spark_dataset): splits = spark_stratified_split( spark_dataset, ratio=test_specs["ratio"], filter_by="user", min_rating=10 ) assert splits[0].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratio"], test_specs["tolerance"] ) assert splits[1].count() / test_specs["number_of_rows"] == pytest.approx( 1 - test_specs["ratio"], test_specs["tolerance"] ) # Test if both contains the same user list. This is because stratified split is stratified. users_train = ( splits[0].select(DEFAULT_USER_COL).distinct().rdd.map(lambda r: r[0]).collect() ) users_test = ( splits[1].select(DEFAULT_USER_COL).distinct().rdd.map(lambda r: r[0]).collect() ) assert set(users_train) == set(users_test) splits = spark_stratified_split(spark_dataset, ratio=test_specs["ratios"]) assert splits[0].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratios"][0], test_specs["tolerance"] ) assert splits[1].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratios"][1], test_specs["tolerance"] ) assert splits[2].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratios"][2], test_specs["tolerance"] )
def test_pv_properties(next100pv): pv = next100pv assert pv.name == 'Next100PV' assert pv.material_name == ti316.name assert pv.radius / mm == approx(1360 / 2, rel=1e-3) assert pv.body_thickness / mm == approx(10, rel=1e-3) assert pv.head_thickness / mm == approx(12, rel=1e-3)
def test_radial_deprecate_origin_y(): with pytest.deprecated_call(): mg = RadialModelGrid(num_shells=1, dr=1.0, origin_y=10) assert mg._xy_of_center == (0.0, 10.0) pts, npts = mg._create_radial_points(1, 1, xy_of_center=mg._xy_of_center) assert pts[0, 0] == approx(mg._xy_of_center[0]) assert pts[0, 1] == approx(mg._xy_of_center[1])
def test_random_splitter(test_specs, spark_dataset): """Test random splitter for Spark dataframes. NOTE: some split results may not match exactly with the ratios, which may be owing to the limited number of rows in the testing data. A approximate match with certain level of tolerance is therefore used instead for tests. """ splits = spark_random_split( spark_dataset, ratio=test_specs["ratio"], seed=test_specs["seed"] ) assert splits[0].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratio"], test_specs["spark_randomsplit_tolerance"] ) assert splits[1].count() / test_specs["number_of_rows"] == pytest.approx( 1 - test_specs["ratio"], test_specs["spark_randomsplit_tolerance"] ) splits = spark_random_split( spark_dataset, ratio=test_specs["ratios"], seed=test_specs["seed"] ) assert splits[0].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratios"][0], test_specs["spark_randomsplit_tolerance"] ) assert splits[1].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratios"][1], test_specs["spark_randomsplit_tolerance"] ) assert splits[2].count() / test_specs["number_of_rows"] == pytest.approx( test_specs["ratios"][2], test_specs["spark_randomsplit_tolerance"] )
def test_vec2d_magic_math(): import operator params = ( (cy.Vec2d(2.1, 7), 3.2), (cy.Vec2d(2.3, 7), (3., 5.7)), (cy.Vec2d(-2.3, 7), cy.Vec2d(0.3, 5.7)), ) ops = ( operator.add, operator.sub, operator.mul, operator.div, operator.floordiv, operator.truediv, operator.mod, ) for left, right in params: tright = right if hasattr(right, '__getitem__') else (right, right) for op in ops: # test op(left, right) msg = "%s( %s , %s )" % (op.__name__, left, right) tres = (op(left.x, tright[0]), op(left.y, tright[1])) res = op(left, right) assert tuple(res) == pytest.approx(tres), msg # test op(right, left) msg = "%s( %s , %s )" % (op.__name__, right, left) tres = (op(tright[0], left.x), op(tright[1], left.y)) res = op(right, left) assert tuple(res) == pytest.approx(tres), msg
def test_multidim_tendencies(): # Same test just repeated in two parallel columns num_lat = 2 state = climlab.column_state(num_lev=num_lev, num_lat=num_lat) state['q'] = state.Tatm * 0. #+ Q state['U'] = state.Tatm * 0. #+ U state['V'] = state.Tatm * 0. #+ V for i in range(num_lat): state.Tatm[i,:] = T state['q'][i,:] += Q state['U'][i,:] += U state['V'][i,:] += V assert hasattr(state, 'Tatm') assert hasattr(state, 'q') assert hasattr(state, 'U') assert hasattr(state, 'V') conv = emanuel_convection.EmanuelConvection(state=state, timestep=DELT) conv.step_forward() # Did we get all the correct output? assert np.all(conv.IFLAG == 1) # relative tolerance for these tests ... tol = 1E-5 assert np.all(conv.CBMF == pytest.approx(3.10377218E-02, rel=tol)) tend = conv.tendencies assert np.tile(FT,(num_lat,1)) == pytest.approx(tend['Tatm'], rel=tol) assert np.tile(FQ,(num_lat,1)) == pytest.approx(tend['q'], rel=tol) assert np.tile(FU,(num_lat,1)) == pytest.approx(tend['U'], rel=tol) assert np.tile(FV,(num_lat,1)) == pytest.approx(tend['V'], rel=tol)
def test_decimal_width(): obj = Round(0.02, 0.005) assert 0.005 == pytest.approx(obj( 0.005)) assert 0.025 == pytest.approx(obj( 0.025)) assert 0.065 == pytest.approx(obj( 0.081)) assert -0.055 == pytest.approx(obj( -0.048)) assert -0.015 == pytest.approx(obj( -0.015))
def test_formatters(Formatter, regex, direction, factor, values): fmt = Formatter() result = fmt(direction, factor, values) prev_degree = prev_minute = prev_second = None for tick, value in zip(result, values): m = regex.match(tick) assert m is not None, '"%s" is not an expected tick format.' % (tick, ) sign = sum(m.group(sign + '_sign') is not None for sign in ('degree', 'minute', 'second')) assert sign <= 1, \ 'Only one element of tick "%s" may have a sign.' % (tick, ) sign = 1 if sign == 0 else -1 degree = float(m.group('degree') or prev_degree or 0) minute = float(m.group('minute') or prev_minute or 0) second = float(m.group('second') or prev_second or 0) if Formatter == FormatterHMS: # 360 degrees as plot range -> 24 hours as labelled range expected_value = pytest.approx((value // 15) / factor) else: expected_value = pytest.approx(value / factor) assert sign * dms2float(degree, minute, second) == expected_value, \ '"%s" does not match expected tick value.' % (tick, ) prev_degree = degree prev_minute = minute prev_second = second
def test_run_analytics_band_center_spectrum(expected_center, expected_wavelengths, expected_values): spectra = phat.Spectra.from_file(get_path('SP_2C_02_02358_S138_E3586.spc')) spectrum = spectra[spectra.columns[1]] center, center_fit = analytics.run_analytics(spectrum, analytics.band_center, 512.6, 2587.9) assert center_fit.mean() == pytest.approx(expected_center) assert np.mean(center[0]) == pytest.approx(expected_wavelengths) assert np.mean(center[1]) == expected_values
def test_seed(): rand = Random() N = 10 min = -10 max = 10 first = [] second = [] third = [] tol = 1e-6 for i in range(N): Random.setSeed(i) first.append(Random.uniform(min, max)); second.append(Random.uniform(min, max)); third.append(Random.uniform(min, max)); for i in range(N): Random.setSeed(i) assert Random.getSeed() is i assert Random.uniform(min, max) == pytest.approx(first[i], tol) assert Random.uniform(min, max) == pytest.approx(second[i], tol) assert Random.uniform(min, max) == pytest.approx(third[i], tol)
def test_element_wo(): # This test doesn't require an OpenMC run. We just need to make sure the # element.expand() method expands elements with the proper nuclide # compositions. h_am = (NATURAL_ABUNDANCE['H1'] * atomic_mass('H1') + NATURAL_ABUNDANCE['H2'] * atomic_mass('H2')) o_am = (NATURAL_ABUNDANCE['O17'] * atomic_mass('O17') + (NATURAL_ABUNDANCE['O16'] + NATURAL_ABUNDANCE['O18']) * atomic_mass('O16')) water_am = 2 * h_am + o_am water = Material() water.add_element('O', o_am / water_am, 'wo') water.add_element('H', 2 * h_am / water_am, 'wo') densities = water.get_nuclide_densities() for nuc in densities.keys(): assert nuc in ('H1', 'H2', 'O16', 'O17') if nuc in ('H1', 'H2'): val = 2 * NATURAL_ABUNDANCE[nuc] * atomic_mass(nuc) / water_am assert densities[nuc][1] == pytest.approx(val) if nuc == 'O16': val = (NATURAL_ABUNDANCE[nuc] + NATURAL_ABUNDANCE['O18']) \ * atomic_mass(nuc) / water_am assert densities[nuc][1] == pytest.approx(val) if nuc == 'O17': val = NATURAL_ABUNDANCE[nuc] * atomic_mass(nuc) / water_am assert densities[nuc][1] == pytest.approx(val)
def test_run_analytics_band_minima(expected_wavelengths, expected_values): spectra = phat.Spectra.from_file(get_path('SP_2C_02_02358_S138_E3586.spc')) minima = analytics.run_analytics(spectra, analytics.band_minima) wavelengths = [np.mean(val[0]) for val in minima] values = [val[1] for val in minima] assert np.mean(wavelengths) == pytest.approx(expected_wavelengths) assert np.mean(values) == pytest.approx(expected_values)
def test_voronoi_closedinternal(): """Test routing on a (radial) voronoi, but with a closed interior node.""" vmg = RadialModelGrid(2, dr=2.) z = np.full(20, 10., dtype=float) all_bounds_but_one = np.array((0, 1, 2, 3, 4, 7, 11, 15, 16, 17, 18, 19)) vmg.status_at_node[all_bounds_but_one] = CLOSED_BOUNDARY vmg.status_at_node[8] = CLOSED_BOUNDARY # new internal closed z[12] = 0. # outlet inner_elevs = (8., 7., 1., 6., 4., 5.) z[vmg.core_nodes] = np.array(inner_elevs) vmg.add_field("node", "topographic__elevation", z, units="-") fr = FlowRouter(vmg) cells_contributing = [ np.array([0]), np.array([1]), np.array([0, 1, 3, 4, 5, 6]), np.array([1, 4]), np.array([1, 4, 5, 6]), np.array([1, 4, 6]), ] A_target_internal = np.zeros(vmg.number_of_core_nodes, dtype=float) for i in range(6): A_target_internal[i] = vmg.area_of_cell[cells_contributing[i]].sum() A_target_outlet = vmg.area_of_cell[vmg.cell_at_node[vmg.core_nodes]].sum() fr.route_flow() assert vmg.at_node["drainage_area"][vmg.core_nodes] == approx(A_target_internal) assert vmg.at_node["drainage_area"][12] == approx(A_target_outlet)
def test_intensity(): from .. import toymodel np.random.seed(0) geom = CameraGeometry.from_name('LSTCam') width = 0.05 length = 0.15 intensity = 50 # make a toymodel shower model model = toymodel.generate_2d_shower_model( centroid=(0.2, 0.3), width=width, length=length, psi='30d', ) image, signal, noise = toymodel.make_toymodel_shower_image( geom, model.pdf, intensity=intensity, nsb_level_pe=5, ) # test if signal reproduces given cog values assert np.average(geom.pix_x.value, weights=signal) == approx(0.2, rel=0.15) assert np.average(geom.pix_y.value, weights=signal) == approx(0.3, rel=0.15) # test if signal reproduces given width/length values cov = np.cov(geom.pix_x.value, geom.pix_y.value, aweights=signal) eigvals, eigvecs = np.linalg.eigh(cov) assert np.sqrt(eigvals[0]) == approx(width, rel=0.15) assert np.sqrt(eigvals[1]) == approx(length, rel=0.15) # test if total intensity is inside in 99 percent confidence interval assert poisson(intensity).ppf(0.05) <= signal.sum() <= poisson(intensity).ppf(0.95)
def test_uniform1(): """ Test uniform distribution """ for n in range(2, 10): d = uniform(n) assert d.outcomes == tuple(range(n)) assert d[0] == pytest.approx(1/n) assert entropy(d) == pytest.approx(np.log2(n))
def test_ii3(): """ Test II and conditional II for xor """ d = Xor() ii1 = interaction_information(d, [[0], [1], [2]], [2]) ii2 = interaction_information(d, [[0], [1]], [2]) assert ii1 == pytest.approx(0) assert ii2 == pytest.approx(1)
def test_rotation_angle(): assert Affine.identity().rotation_angle == 0.0 assert Affine.scale(2).rotation_angle == 0.0 assert Affine.scale(2, 1).rotation_angle == 0.0 assert Affine.translation(32, -47).rotation_angle == pytest.approx(0.0) assert Affine.rotation(30).rotation_angle == pytest.approx(30) assert Affine.rotation(-150).rotation_angle == pytest.approx(-150)
def test_compute_rating_predictions(rating_true): svd = surprise.SVD() train_set = surprise.Dataset.load_from_df( rating_true, reader=surprise.Reader() ).build_full_trainset() svd.fit(train_set) preds = compute_rating_predictions(svd, rating_true) assert set(preds.columns) == {"userID", "itemID", "prediction"} assert preds["userID"].dtypes == rating_true["userID"].dtypes assert preds["itemID"].dtypes == rating_true["itemID"].dtypes user = rating_true.iloc[0]["userID"] item = rating_true.iloc[0]["itemID"] assert preds[(preds["userID"] == user) & (preds["itemID"] == item)][ "prediction" ].values == pytest.approx(svd.predict(user, item).est, rel=TOL) preds = compute_rating_predictions( svd, rating_true.rename(columns={"userID": "uid", "itemID": "iid"}), usercol="uid", itemcol="iid", predcol="pred", ) assert set(preds.columns) == {"uid", "iid", "pred"} assert preds["uid"].dtypes == rating_true["userID"].dtypes assert preds["iid"].dtypes == rating_true["itemID"].dtypes user = rating_true.iloc[1]["userID"] item = rating_true.iloc[1]["itemID"] assert preds[(preds["uid"] == user) & (preds["iid"] == item)][ "pred" ].values == pytest.approx(svd.predict(user, item).est, rel=TOL)
def test_dial(s1hg, s1b, s1f): s1hg.move(4) s1hg.dial(0) assert s1hg.position() == pytest.approx(4) assert s1hg.dial() == pytest.approx(0) assert s1b.position() == pytest.approx(0) assert s1f.position() == pytest.approx(0)
def test_read_from_xtc(self): """ Tests for read_from_xtc() """ topology = os.path.join(here, "test_data/barstar_md_traj.gro") traj = os.path.join(here, "test_data/barstar_md_traj.xtc") universe = MDAnalysis.Universe(topology, traj) selection = universe.select_atoms("backbone") # First timeframe atom = structure.Atom.read_from_xtc(selection[0]) assert atom.resid == 1 assert atom.name == "N" for a, b in zip(atom.coords, [21.68, 33.87, 36.18]): assert a == pytest.approx(b, abs=1e-3) atom = structure.Atom.read_from_xtc(selection[-1]) assert atom.resid == 89 assert atom.name == "C" for a, b in zip(atom.coords, [40.14, 38.75, 28.42]): assert a == pytest.approx(b, abs=1e-3) #Last one ts = universe.trajectory[-1] atom = structure.Atom.read_from_xtc(selection[0]) for a, b in zip(atom.coords, [20.63, 38.43, 32.09]): assert a == pytest.approx(b, abs=1e-3) atom = structure.Atom.read_from_xtc(selection[-1]) for a, b in zip(atom.coords, [39.14, 39.77, 25.60]): assert a == pytest.approx(b, abs=1e-3)
def test_subside_point_load(): params = dict(eet=65000.0, youngs=7e10) load = 1e9 loc = (5000.0, 2500.0) x = np.arange(0, 10000, 1000.0) y = np.arange(0, 5000, 1000.0) (x, y) = np.meshgrid(x, y) x.shape = (x.size,) y.shape = (y.size,) dz_one_load = subside_point_load(load, loc, (x, y), params=params) n_loads = 16 dz = subside_point_load( np.full(n_loads, load / n_loads), np.full((n_loads, 2), loc).T, (x, y), params=params, ) assert dz.mean() == approx(dz_one_load.mean()) assert dz.min() == approx(dz_one_load.min()) assert dz.max() == approx(dz_one_load.max())
def test_initial_routing(dans_grid3): """ Test the action of fr.run_one_step() on the grid. """ dans_grid3.fr.run_one_step() assert dans_grid3.mg.at_node["flow__receiver_node"] == approx(dans_grid3.r_old) assert dans_grid3.mg.at_node["drainage_area"] == approx(dans_grid3.A_old)
def test_jog(robz): robz.velocity(10) robz.jog(300) assert robz.velocity() == 300 t = 1+robz.acctime() start_time = time.time() time.sleep(t) elapsed_time = (time.time()-start_time) - robz.acctime() assert robz._hw_position() == pytest.approx(300*elapsed_time+robz.acceleration()*0.5*robz.acctime()**2, 1e-2) assert robz.state() == "MOVING" robz.stop() assert robz.state() == "READY" assert robz._set_position() == robz.position() robz.dial(0); robz.position(0) assert robz.velocity() == 10 robz.jog(-300, reset_position=0) assert robz.velocity() == 300 start_time = time.time() time.sleep(t) elapsed_time = (time.time()-start_time) - robz.acctime() assert robz._hw_position() == pytest.approx(-300*elapsed_time-robz.acceleration()*0.5*robz.acctime()**2, 1e-2) robz.stop() assert robz.dial() == 0 assert robz.velocity() == 10 robz.jog(300, reset_position=Modulo()) time.sleep(t) robz.stop() assert robz.position() == pytest.approx(90, 0.1)
def test_shapExplainer(self, model): explainer = model.shapExplainer() assert explainer.expected_value[0] == pytest.approx(-0.22667938806360247)
def test_reasonable_defaults(self): # Whatever the defaults are, they should work for numbers close to 1 # than have a small amount of floating-point error. assert 0.1 + 0.2 == approx(0.3)
def test_nan_tolerance(self): illegal_kwargs = [dict(rel=nan), dict(abs=nan), dict(rel=nan, abs=nan)] for kwargs in illegal_kwargs: with pytest.raises(ValueError): 1.1 == approx(1, **kwargs)
def test_opposite_sign(self): examples = [(eq, 1e-100, -1e-100), (ne, 1e100, -1e100)] for op, a, x in examples: assert op(a, approx(x))
def test_can_get_atlas_center_in_shared_space(res, w, h, d): atlas = Atlas(volume=np.empty((w, h, d)), resolution_um=res) assert atlas.center == approx((w * res / 2, h * res / 2, d * res / 2))
def test_rgb_distance(): assert ( rgb_distance((0, 0, 0), (0, 255, 255)) == pytest.approx(360.62445840513923744443))
def test_score(self, model): assert model.score(cutoff=0.9, method="accuracy") == pytest.approx(1.0) assert model.score(cutoff=0.1, method="accuracy") == pytest.approx(1.0) assert model.score( cutoff=0.9, method="auc", pos_label="Train" ) == pytest.approx(1.0) assert model.score( cutoff=0.1, method="auc", pos_label="Train" ) == pytest.approx(1.0) assert model.score( cutoff=0.9, method="best_cutoff", pos_label="Train" ) == pytest.approx(0.999) assert model.score( cutoff=0.1, method="best_cutoff", pos_label="Train" ) == pytest.approx(0.999) assert model.score(cutoff=0.9, method="bm", pos_label="Train") == pytest.approx( 0.0 ) assert model.score(cutoff=0.1, method="bm", pos_label="Train") == pytest.approx( 0.0 ) assert model.score( cutoff=0.9, method="csi", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.1, method="csi", pos_label="Train" ) == pytest.approx(0.0) assert model.score(cutoff=0.9, method="f1", pos_label="Train") == pytest.approx( 0.0 ) assert model.score(cutoff=0.1, method="f1", pos_label="Train") == pytest.approx( 0.0 ) assert model.score( cutoff=0.9, method="logloss", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.1, method="logloss", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.9, method="mcc", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.1, method="mcc", pos_label="Train" ) == pytest.approx(0.0) assert model.score(cutoff=0.9, method="mk", pos_label="Train") == pytest.approx( 0.0 ) assert model.score(cutoff=0.1, method="mk", pos_label="Train") == pytest.approx( 0.0 ) assert model.score( cutoff=0.9, method="npv", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.1, method="npv", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.9, method="prc_auc", pos_label="Train" ) == pytest.approx(1.0) assert model.score( cutoff=0.1, method="prc_auc", pos_label="Train" ) == pytest.approx(1.0) assert model.score( cutoff=0.9, method="precision", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.1, method="precision", pos_label="Train" ) == pytest.approx(0.0) assert model.score( cutoff=0.9, method="specificity", pos_label="Train" ) == pytest.approx(1.0) assert model.score( cutoff=0.1, method="specificity", pos_label="Train" ) == pytest.approx(1.0)
def bf10(x): return approx(1 / x, rel=1e-5)
def test_rand(self): t_psi = Dense1D.rand(7, dtype='complex64') assert t_psi.shape == (2, ) * 7 assert t_psi.dtype == 'complex64' assert (t_psi.H @ t_psi) == pytest.approx(1.0)
def test_classification_report(self, model): cls_rep1 = model.classification_report().transpose() assert cls_rep1["auc"][0] == pytest.approx(1.0) assert cls_rep1["prc_auc"][0] == pytest.approx(1.0) assert cls_rep1["accuracy"][0] == pytest.approx(1.0) assert cls_rep1["log_loss"][0] == pytest.approx(0.0) assert cls_rep1["precision"][0] == pytest.approx(1.0) assert cls_rep1["recall"][0] == pytest.approx(1.0) assert cls_rep1["f1_score"][0] == pytest.approx(1.0) assert cls_rep1["mcc"][0] == pytest.approx(1.0) assert cls_rep1["informedness"][0] == pytest.approx(1.0) assert cls_rep1["markedness"][0] == pytest.approx(1.0) assert cls_rep1["csi"][0] == pytest.approx(1.0) assert cls_rep1["cutoff"][0] == pytest.approx(0.999) cls_rep2 = model.classification_report(cutoff=0.2).transpose() assert cls_rep2["cutoff"][0] == pytest.approx(0.2)
def test_slopes_at_patches(self): rmg = RasterModelGrid((4, 5)) rmg.at_node["topographic__elevation"] = rmg.node_x.copy() slopes_out = rmg.calc_slope_at_node() assert np.all(slopes_out == approx(np.full(20, np.pi / 4.0, dtype=float)))
def test_envelope_rect(graph): # Without a clock signal, generate a single pulse at t = 0 graph.sample_rate = 1000 env = EnvelopeRect(0.1) graph.render_subgraph(env, reset=True) assert env.output_buffer[0][0] == pytest.approx(1.0) assert env.output_buffer[0][99] == pytest.approx(1.0) assert env.output_buffer[0][100] == pytest.approx(0.0) # Trigger 2x envelope pulses at t = 0.1, 0.2 env = EnvelopeRect(0.01, clock=OneTapDelay(Impulse(10.0), 0.1)) graph.render_subgraph(env, reset=True) assert env.output_buffer[0][0] == pytest.approx(0.0) assert env.output_buffer[0][99] == pytest.approx(0.0) assert env.output_buffer[0][100] == pytest.approx(1.0) assert env.output_buffer[0][109] == pytest.approx(1.0) assert env.output_buffer[0][110] == pytest.approx(0.0) assert env.output_buffer[0][199] == pytest.approx(0.0) assert env.output_buffer[0][200] == pytest.approx(1.0) assert env.output_buffer[0][209] == pytest.approx(1.0) assert env.output_buffer[0][210] == pytest.approx(0.0)
def test_changing_slopes(dans_grid3): """ Test with the output from a successful run of fr.run_one_step. """ slope_old = np.array( [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.1, 0.0, 0.1, 2.0, 0.0, 0.0, 2.0, 0.14142136, 0.1, 0.14142136, 2.0, 0.0, 0.0, 2.0, 1.2, 1.0, 1.0, 2.0, 0.0, 0.0, 1.06066017, 1.1, 1.06066017, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ) slope_new = np.array( [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.1, 2.0, 0.0, 0.0, 2.0, 1.2, 1.0, 1.0, 2.0, 0.0, 0.0, 1.06066017, 1.1, 1.06066017, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ) dans_grid3.fr.run_one_step() assert dans_grid3.mg.at_node["topographic__steepest_slope"] == approx(slope_old) dans_grid3.lf.map_depressions() assert dans_grid3.mg.at_node["topographic__steepest_slope"] == approx(slope_new)
def test_swap_gating(self): psi0 = MPS_rand_state(20, 5) CNOT = qu.controlled('not') psi0XX = psi0.gate(CNOT, (4, 13)) psi0XX_s = psi0.gate_with_auto_swap(CNOT, (4, 13)) assert psi0XX.H @ psi0XX_s == pytest.approx(1.0)
def test_composite_pits(): """ A test to ensure the component correctly handles cases where there are multiple pits, inset into each other. """ mg = RasterModelGrid((10, 10)) z = mg.add_field("topographic__elevation", mg.node_x.copy(), at="node") # a sloping plane # np.random.seed(seed=0) # z += np.random.rand(100)/10000. # punch one big hole z.reshape((10, 10))[3:8, 3:8] = 0.0 # dig a couple of inset holes z[57] = -1.0 z[44] = -2.0 z[54] = -10.0 # make an outlet z[71] = 0.9 fr = FlowAccumulator(mg, flow_director="D8") lf = DepressionFinderAndRouter(mg) fr.run_one_step() lf.map_depressions() flow_sinks_target = np.zeros(100, dtype=bool) flow_sinks_target[mg.boundary_nodes] = True # no internal sinks now: assert_array_equal(mg.at_node["flow__sink_flag"], flow_sinks_target) # test conservation of mass: assert mg.at_node["drainage_area"].reshape((10, 10))[1:-1, 1].sum() == approx( 8.0 ** 2 ) # ^all the core nodes # test the actual flow field: # nA = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # 8., 8., 7., 6., 5., 4., 3., 2., 1., 0., # 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # 1., 1., 1., 4., 2., 2., 8., 4., 1., 0., # 1., 1., 1., 8., 3., 15., 3., 2., 1., 0., # 1., 1., 1., 13., 25., 6., 3., 2., 1., 0., # 1., 1., 1., 45., 3., 3., 5., 2., 1., 0., # 50., 50., 49., 3., 2., 2., 2., 4., 1., 0., # 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) nA = np.array( [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 4.0, 2.0, 2.0, 6.0, 4.0, 1.0, 0.0, 1.0, 1.0, 1.0, 6.0, 3.0, 12.0, 3.0, 2.0, 1.0, 0.0, 1.0, 1.0, 1.0, 8.0, 20.0, 4.0, 3.0, 2.0, 1.0, 0.0, 1.0, 1.0, 1.0, 35.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 50.0, 50.0, 49.0, 13.0, 10.0, 8.0, 6.0, 4.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ) assert_array_equal(mg.at_node["drainage_area"], nA) # the lake code map: lc = np.array( [ XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, 57, 57, 57, 57, 57, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, ] ) # test the remaining properties: assert lf.lake_outlets.size == 1 assert lf.lake_outlets[0] == 72 outlets_in_map = np.unique(lf.depression_outlet_map) assert outlets_in_map.size == 2 assert outlets_in_map[1] == 72 assert lf.number_of_lakes == 1 assert lf.lake_codes[0] == 57 assert_array_equal(lf.lake_map, lc) assert lf.lake_areas[0] == approx(25.0) assert lf.lake_volumes[0] == approx(63.0)
def test_works_seamlessly_for_hawki_package(self, capsys): cmd = scopesim.UserCommands(use_instrument="HAWKI") opt = scopesim.OpticalTrain(cmd) opt["detector_1024_window"].include = False opt["detector_array_list"].include = True opt.update() # test that the major values have been updated in rc.__currsys__ assert rc.__currsys__["!TEL.area"].value == approx(52.02, rel=1e-3) assert rc.__currsys__["!TEL.etendue"].value == approx(0.58455, rel=1e-3) assert rc.__currsys__["!INST.pixel_scale"] == approx(0.106, rel=1e-3) # test that OpticalTrain builds properly assert isinstance(opt, scopesim.OpticalTrain) # test that we have a system throughput wave = np.linspace(0.7, 2.5, 181) * u.um tc = opt.optics_manager.surfaces_table.throughput ec = opt.optics_manager.surfaces_table.emission # ..todo:: something super wierd is going on here when running pytest in the top directory # ..todo:: perhaps this is has to be relaxed due to different filter # assert 0.5 < np.max(tc(wave)).value < 0.55 assert 0.5 < np.max(tc(wave)).value < 0.8 if PLOTS: plt.plot(wave, tc(wave)) plt.show() if PLOTS: plt.plot(wave, ec(wave)) plt.show() # test that we have the correct number of FOVs for Ks band # assert len(opt.fov_manager.fovs) == 18 # Apparently this is 9 now? assert len(opt.fov_manager.fovs) == 9 if PLOTS: fovs = opt.fov_manager.fovs from scopesim.optics.image_plane_utils import calc_footprint plt.subplot(121) for fov in fovs: x, y = calc_footprint(fov.hdu.header) plt.fill(x*3600, y*3600, alpha=0.1, c="b") plt.title("Sky plane") plt.xlabel("[arcsec]") plt.subplot(122) for fov in fovs: x, y = calc_footprint(fov.hdu.header, "D") plt.fill(x, y) plt.title("Detector focal plane") plt.xlabel("[mm]") plt.show() # test that the ImagePlane is large enough assert opt.image_planes[0].header["NAXIS1"] > 4200 assert opt.image_planes[0].header["NAXIS2"] > 4200 assert np.all(opt.image_planes[0].data == 0) # test assert there are 4 detectors, each 2048x2048 pixels hdu = opt.readout()[0] assert len(opt.detector_arrays[0].detectors) == 4 for detector in opt.detector_arrays[0].detectors: assert detector.hdu.header["NAXIS1"] == 2048 assert detector.hdu.header["NAXIS2"] == 2048 if PLOTS: for i in range(1, 5): plt.subplot(2, 2, i) plt.imshow(hdu[i].data) plt.show() dit = rc.__currsys__["!OBS.dit"] ndit = rc.__currsys__["!OBS.ndit"] assert np.average(hdu[1].data) == approx(ndit * dit * 0.1, abs=0.5)
def test_degenerate_drainage(): """ This "hourglass" configuration should be one of the hardest to correctly re-route. """ mg = RasterModelGrid((9, 5)) z_init = mg.node_x.copy() * 0.0001 + 1.0 lake_pits = np.array([7, 11, 12, 13, 17, 27, 31, 32, 33, 37]) z_init[lake_pits] = -1.0 z_init[22] = 0.0 # the common spill pt for both lakes z_init[21] = 0.1 # an adverse bump in the spillway z_init[20] = -0.2 # the spillway mg.add_field("topographic__elevation", z_init, at="node") fr = FlowAccumulator(mg, flow_director="D8") lf = DepressionFinderAndRouter(mg) fr.run_one_step() lf.map_depressions() # correct_A = np.array([ 0., 0., 0., 0., 0., # 0., 1., 3., 1., 0., # 0., 5., 1., 2., 0., # 0., 1., 10., 1., 0., # 21., 21., 1., 1., 0., # 0., 1., 9., 1., 0., # 0., 3., 1., 2., 0., # 0., 1., 1., 1., 0., # 0., 0., 0., 0., 0.]) correct_A = np.array( [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 3.0, 1.0, 0.0, 0.0, 2.0, 4.0, 2.0, 0.0, 0.0, 1.0, 10.0, 1.0, 0.0, 21.0, 21.0, 1.0, 1.0, 0.0, 0.0, 1.0, 9.0, 1.0, 0.0, 0.0, 2.0, 2.0, 2.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ) assert mg.at_node["drainage_area"] == approx(correct_A)
def test_D8_D4_route(d4_grid): """ Tests the functionality of D4 routing. """ d4_grid.frD8.run_one_step() d4_grid.frD4.run_one_step() d4_grid.lfD8.map_depressions() d4_grid.lfD4.map_depressions() assert d4_grid.lfD8.number_of_lakes == 1 assert d4_grid.lfD4.number_of_lakes == 3 # flow_recD8 = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 16, 10, 16, 10, 18, # 13, 14, 14, 15, 16, 10, 18, 20, 21, 16, 16, 16, 18, # 33, 27, 28, 28, 24, 24, 24, 32, 34, 35, 35, 38, 32, # 32, 32, 41, 42, 43, 44, 45, 46, 47, 48]) flow_recD8 = np.array( [ 0, 1, 2, 3, 4, 5, 6, 7, 16, 10, 16, 10, 18, 13, 14, 14, 15, 16, 17, 18, 20, 21, 16, 16, 16, 18, 33, 27, 28, 28, 24, 24, 24, 32, 34, 35, 35, 38, 32, 32, 32, 41, 42, 43, 44, 45, 46, 47, 48, ] ) # flow_recD4 = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 7, 10, 17, 10, 11, # 13, 14, 14, 15, 16, 17, 18, 20, 21, 21, 16, 17, 18, # 33, 27, 28, 28, 29, 24, 31, 32, 34, 35, 35, 36, 37, # 32, 33, 41, 42, 43, 44, 45, 46, 47, 48]) flow_recD4 = np.array( [ 0, 1, 2, 3, 4, 5, 6, 7, 7, 10, 17, 10, 11, 13, 14, 14, 15, 16, 17, 18, 20, 21, 21, 16, 17, 18, 33, 27, 28, 28, 29, 38, 31, 32, 34, 35, 35, 36, 37, 32, 33, 41, 42, 43, 44, 45, 46, 47, 48, ] ) assert_array_equal(d4_grid.mg1.at_node["flow__receiver_node"], flow_recD8) assert_array_equal(d4_grid.mg2.at_node["flow__receiver_node"], flow_recD4) assert d4_grid.mg1.at_node["drainage_area"].reshape((7, 7))[:, 0].sum() == approx( d4_grid.mg2.at_node["drainage_area"].reshape((7, 7))[:, 0].sum() )
def test_scaleindex(self): dat = self.make_5d() assert dat.scale_to_index(2, 1) == pytest.approx(1) assert dat.scale_to_index(2, 3) == pytest.approx(1.5) assert dat.index_to_scale(2, 2) == pytest.approx(5) assert dat.index_to_scale(2, 2.5) == pytest.approx(7)
def test_three_pits(): """ A test to ensure the component correctly handles cases where there are multiple pits. """ mg = RasterModelGrid((10, 10)) z = mg.add_field("topographic__elevation", mg.node_x.copy(), at="node") # a sloping plane # np.random.seed(seed=0) # z += np.random.rand(100)/10000. # punch some holes z[33] = 1.0 z[43] = 1.0 z[37] = 4.0 z[74:76] = 1.0 fr = FlowAccumulator(mg, flow_director="D8") lf = DepressionFinderAndRouter(mg) fr.run_one_step() lf.map_depressions() flow_sinks_target = np.zeros(100, dtype=bool) flow_sinks_target[mg.boundary_nodes] = True # no internal sinks now: assert_array_equal(mg.at_node["flow__sink_flag"], flow_sinks_target) # test conservation of mass: assert mg.at_node["drainage_area"].reshape((10, 10))[1:-1, 1].sum() == approx( 8.0 ** 2 ) # ^all the core nodes # test the actual flow field: nA = np.array( [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 2.0, 2.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.0, 26.0, 26.0, 25.0, 15.0, 11.0, 10.0, 9.0, 8.0, 1.0, 0.0, 2.0, 2.0, 1.0, 9.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.0, 2.0, 2.0, 1.0, 1.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 3.0, 2.0, 1.0, 0.0, 20.0, 20.0, 19.0, 18.0, 17.0, 12.0, 3.0, 2.0, 1.0, 0.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 3.0, 2.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ) assert_array_equal(mg.at_node["drainage_area"], nA) # test a couple more properties: lc = np.empty(100, dtype=int) lc.fill(XX) lc[33] = 33 lc[43] = 33 lc[37] = 37 lc[74:76] = 74 assert_array_equal(lf.lake_map, lc) assert_array_equal(lf.lake_codes, [33, 37, 74]) assert lf.number_of_lakes == 3 assert lf.lake_areas == approx([2.0, 1.0, 2.0]) assert lf.lake_volumes == approx([2.0, 2.0, 4.0])
def test_LMPR_complexity3(n): """ Test that uniform Distirbutions have zero complexity. """ d = Distribution.from_distribution(uniform(n)) assert LMPR_complexity(d) == pytest.approx(0)
def test_edge_draining(): """ This tests when the lake attempts to drain from an edge, where an issue is suspected. """ # Create a 7x7 test grid with a well defined hole in it, AT THE EDGE. mg = RasterModelGrid((7, 7)) z = mg.node_x.copy() guard_sides = np.concatenate((np.arange(7, 14), np.arange(35, 42))) edges = np.concatenate((np.arange(7), np.arange(42, 49))) hole_here = np.array(([15, 16, 22, 23, 29, 30])) z[guard_sides] = z[13] z[edges] = -2.0 # force flow outwards from the tops of the guards z[hole_here] = -1.0 A_new = np.array( [ [ [ 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 15.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 0.0, 10.0, 4.0, 3.0, 2.0, 1.0, 0.0, 0.0, 1.0, 4.0, 3.0, 2.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ] ] ] ).flatten() depr_outlet_target = np.array( [ XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, 14, 14, XX, XX, XX, XX, XX, 14, 14, XX, XX, XX, XX, XX, 14, 14, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, ] ).flatten() mg.add_field("topographic__elevation", z, at="node", units="-") fr = FlowAccumulator(mg, flow_director="D8") lf = DepressionFinderAndRouter(mg) fr.run_one_step() lf.map_depressions() assert mg.at_node["drainage_area"] == approx(A_new) assert lf.depression_outlet_map == approx(depr_outlet_target)
def test_disequilibrium4(n): """ Test that uniform Distributions have zero disequilibrium. """ d = Distribution.from_distribution(uniform(n)) assert disequilibrium(d) == pytest.approx(0)
def test_lsvi_without_bonus(): def lsvi_debug_gather_data(agent): """ Function to gather data sampling uniformly states and actions """ N = agent.n_episodes * agent.horizon count = 0 while count < N: state = agent.env.observation_space.sample() action = agent.env.action_space.sample() next_state, reward, done, info = agent.env.sample(state, action) # # feat = agent.feature_map.map(state, action) outer_prod = np.outer(feat, feat) inv = agent.lambda_mat_inv # agent.lambda_mat += np.outer(feat, feat) # update inverse agent.lambda_mat_inv -= (inv @ outer_prod @ inv) / (1 + feat @ inv.T @ feat) # update history agent.reward_hist[count] = reward agent.state_hist.append(state) agent.action_hist.append(action) agent.nstate_hist.append(next_state) # tt = agent.total_time_steps agent.feat_hist[tt, :] = agent.feature_map.map(state, action) for aa in range(agent.env.action_space.n): agent.feat_ns_all_actions[tt, aa, :] = agent.feature_map.map( next_state, aa ) # increments agent.total_time_steps += 1 count += 1 env = GridWorld(nrows=2, ncols=2, walls=(), success_probability=0.95) env.reseed(123) def feature_map_fn(_env): return OneHotFeatureMap(_env.observation_space.n, _env.action_space.n) agent = LSVIUCBAgent( env, feature_map_fn=feature_map_fn, horizon=20, gamma=0.99, reg_factor=1e-5 ) agent.reseed(123) agent.n_episodes = 100 agent.reset() lsvi_debug_gather_data(agent) # estimated Q S = env.observation_space.n Q_est = agent._run_lsvi(bonus_factor=0.0)[0, :].reshape((S, -1)) # near optimal Q agent_opt = ValueIterationAgent(env, gamma=0.99, horizon=20) agent_opt.fit() Q = agent_opt.Q[0, :, :] print(Q) print("---") print(Q_est) print("-------") print(np.abs(Q - Q_est)) # Check error assert Q_est == pytest.approx(Q, rel=0.01)
def test_LMPR_complexity4(n): """ Test that peaked ScalarDistributions have zero complexity. """ d = ScalarDistribution([1] + [0] * (n - 1)) assert LMPR_complexity(d) == pytest.approx(0)
def test_LMPR_complexity2(n): """ Test that uniform ScalarDistirbutions have zero complexity. """ d = uniform(n) assert LMPR_complexity(d) == pytest.approx(0)
def test_market_in_focus(self): li = get_market_in_focus() assert len(li) == pytest.approx(10, 1)
def test_disequilibrium3(n): """ Test that uniform ScalarDistributions have zero disequilibrium. """ d = uniform(n) assert disequilibrium(d) == pytest.approx(0)
def test_market_iex_percent(self): li = get_market_iex_percent() assert len(li) == pytest.approx(10, 1)
def test_list_sector_performance(self): li = get_sector_performance() assert len(li) == pytest.approx(10, 1)