def test_portfolio_weights_get_updated_by_predictions_up_to_one_after_the_batch(memory): record(memory, 4) b = get_stable_batch(memory, 2, 1) b.predictions = [[0.0, 0.0]] * 2 memory.update(b) b = get_stable_batch(memory, 2, 7) assert_weights([[0.0]] * 2, b.weights)
def test_portfolio_weights_of_a_batch_can_be_updated_with_predictions(memory): seed = int(time.time()) # should be stable in a random environment so select a random seed record(memory, 100) b = get_stable_batch(memory, 3, seed) old_w = list(b.weights) b.predictions = [[0.0, 0.0]] * 3 memory.update(b) b = get_stable_batch(memory, 3, seed) assert_weights([old_w[0]] + [[0.0]] * 2, b.weights)
def test_fpm_memory_restores_with_correctly_when_exceeding_capacity( save_file, default): cfg = default.window(1).size(2).cfg m_saved = make_memory(cfg) record(m_saved, 3) m_saved.save(save_file) assert_batch(batch(2.0, 3.0), get_stable_batch(m_saved, 1, 11)) m_restored = make_memory(cfg) m_restored.restore(save_file) assert_batch(batch(2.0, 3.0), get_stable_batch(m_restored, 1, 11))
def test_batch_selection_follows_a_geometrically_decaying_distribution(memory): np.random.seed(7) memory.beta = 0.5 records = 6 record(memory, records) distribution = [0] * (records - 2) n = 1000 for _ in range(0, n): distribution[identify_state(memory.get_random_batch(2)[0]) - 1] += 1 distribution[:] = [p / n for p in distribution] assert pytest.approx([0.125, 0.125, 0.25, 0.5], 0.1) == distribution
def test_drop_history_when_memory_capacity_is_reached(memory): memory.capacity = 2 record(memory, 3) assert_batch(batch(2.0, 3.0), get_stable_batch(memory, 1, 11))
def test_portfolio_weight_update_is_clamped_to_record_size(memory): record(memory, 2) b = get_stable_batch(memory, 2, 1) b.predictions = [[0.0, 0.0]] * 2 memory.update(b)