Пример #1
0
def create_counterstrategy(consmdp,
                           capacity,
                           targets,
                           init_state,
                           energy=None,
                           solver=GoalLeaningES,
                           objective=BUCHI,
                           threshold=0.1):
    """
    Create counter strategy for given parameters and the current consMDP object
    and return the strategy
    """

    if energy is None:
        energy = capacity
    if solver == GoalLeaningES:
        slvr = GoalLeaningES(consmdp, capacity, targets, threshold=threshold)
    elif solver == BasicES:
        slvr = BasicES(consmdp, capacity, targets)
    selector = slvr.get_selector(objective)
    strategy = CounterStrategy(consmdp,
                               selector,
                               capacity,
                               energy,
                               init_state=init_state)
    return strategy
Пример #2
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# # Test computation of minimal initial loads for Büchi objective

from math import inf
from reachability_examples import ultimate, little_alsure
from fimdp.energy_solvers import BasicES
from fimdp.objectives import BUCHI

# ## Test case 1
# Simple computation of Büchi values on a non-trivial example where we need to go through reload states to obtain the optimal values.

# +
m, targets = ultimate()

solver = BasicES(m, 15, targets)
result = solver.get_min_levels(BUCHI)
expected = [6, inf, inf, 3, 0, 1, 10, inf, 4, inf, inf]
solver
# -

assert result == expected, ("get_min_levels(BUCHI) returns" +
                            " wrong values:\n" +
                            f"  expected: {expected}\n  returns:  {result}\n")
print("Passed test 1 for get_min_levels(BUCHI) in test_buchi file.")

# ## Test case 2: insufficient capacity

# The same MDP, now with capacity 14. No initial load is enough.

solver.cap = 14
Пример #3
0
# 1. Test initialization (with an iterable, nothing)
# 2. Test update (correct and incorrect actions)
# 3. Test `select_action`
# 4. Test `copy_values_from`

from fimdp.core import CounterSelector

# ### 1. Test initialization

selector = CounterSelector(m)
assert len(selector) == m.num_states

# Initialize with a list of dicts

# +
solver = BasicES(m, cap=1000, targets=T)
s = list(solver.get_selector(AS_REACH))

selector = CounterSelector(m, s)
assert selector == s
selector
# -

print("Passed test 0 for CounterSelector in file test_strategy.py")

# ### 2. Update CounterSelector
# Update selector should assign the correct tuples to correct underlying dicts. We will use some of the following actions:

selector = CounterSelector(m)
m.actions
Пример #4
0
threshold_class = lambda mdp, cap, t: GoalLeaningES(mdp, cap, t, threshold=0.1)
showcase_solver(threshold_class, capacity=35)

problematic = 187
strategy_at(threshold_class, problematic, capacity=35)

# This strategy uses the strong actions (`4`—`7`) for energy between 16 and 27. It goes `NORTH=4` only with interval 16—18, and prefers to go to `SOUTH=6` with energy in 19—28, and finally uses weak action to `EAST` with more than 27 units of energy.

# ## Equivalent values

# In this section, we show that the new solvers are actually improving the Basic solver while maintaining the same minimal energy levels needed to fulfill the objectives. These values can be obtained by calling the `get_min_levels(BUCHI)` on the solvers.

from fimdp.objectives import BUCHI

m, t = e.get_consmdp()
basic = BasicES(m, cap=35, targets=t)
goal = GoalLeaningES(m, cap=35, targets=t)
threshold = GoalLeaningES(m, cap=35, targets=t, threshold=0.1)

assert basic.get_min_levels(BUCHI) == goal.get_min_levels(BUCHI), (
    "The basic and goal-leaning strategy " +
    "do not reach the same values of " + "initial load for the same task.")
print(
    "Passed test 1 for values of goal-leaning strategies in file tut/Solvers.ipynb"
)
assert basic.get_min_levels(BUCHI) == threshold.get_min_levels(BUCHI), (
    "The basic and the threshold strategy " +
    "do not reach the same values of " + "initial load for the same task.")
print(
    "Passed test 2 for values of goal-leaning strategies in file tut/Solvers.ipynb"
)
Пример #5
0
lmdp.state_labels = [set(), {0}, {1}, set()]

# Create a deterministic automaton for the desired formula

f = spot.formula("GF s1 & GF s2")
aut = spot.translate(f, "BA", "deterministic", "complete")
assert aut.is_deterministic()
aut

# Build the product. It should contain states named in the form `mdp_state,automaton_state`.

p, T = lmdp.product_with_dba(aut)
assert p.names == ['0,1', '1,0', '2,1', '3,1', '3,0', '0,0', '2,2']
print("Passed test 1 for product in file test_product.py")

psolver = BasicES(p, 5, T)
res = psolver.get_min_levels(BUCHI)
assert res == [inf, inf, inf, inf, inf, inf, inf]
print("Passed test 2 for product in file test_product.py")

# +
psolver.cap = 9
res = psolver.get_selector(BUCHI, recompute=True)

result = []
for rule in res:
    result.append({k: v.label for k, v in rule.items()})
assert result == [{
    6: 'α',
    2: 'β'
}, {
Пример #6
0
        prev_o = curr_o
        prev_e = curr_e
        
    m.add_action(prev_o,{0:1},"p",1)
    m.add_action(prev_e,{1:1},"p",1)
    
    return m

fimdp.dot.dotpr = "neato"

""
cap = 32 # We have cap/2 reload states, cap/4 in each flower
path = 6
m = consMDP_double_flower(cap, path)

solver = BasicES(m, cap=cap + 2, targets=[2])
result = solver.get_min_levels(POS_REACH)
expected = [3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3]
solver

""
assert result == expected, ("get_positive_reachability() returns" +
    " wrong values:\n" +
    f"  expected: {expected}\n  returns:  {result}\n")
print("Passed test 1 for get_positive_reachability() in test_reachability file.")

""
solver = BasicES(m, cap=cap, targets=[2])
result = solver.get_min_levels(POS_REACH)
expected = [31, 30, 0, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, 32, 31, 32, 31, 32, 31, 32, 31, 32, 31, 32, 31]
solver
Пример #7
0
    selector.select_action(0, 1, 12)
    assert False
except KeyError:
    print("Passed test 3 for ProductSelector")

action = selector.select_action(0, 0, 12)
assert action in lmdp.actions_for_state(0)
print("Passed test 4 for ProductSelector")

# ## Equivalence of ProductSelectorWrapper to ProductSelector

from fimdp.core import ProductSelectorWrapper

# Create ProductSelector and initialize it using CounterSelector for product

psolver = BasicES(product, 9, T)
p_selector = psolver.get_selector(BUCHI)
selector = ProductSelector(product)
for state, rule in enumerate(p_selector):
    for energy, action in rule.items():
        selector.update(state, energy, action)

wrapper = ProductSelectorWrapper(product, p_selector)

for p_s in range(product.num_states):
    orig, other = product.components[p_s]
    for energy in wrapper[p_s]:
        w_a = wrapper.select_action(orig, other, energy)
        s_a = selector.select_action(orig, other, energy)
        assert s_a == w_a
print("Passed test 5 for ProductSelector")
Пример #8
0
m.add_action(2, {4:1}, "a", 2)
m.add_action(12, {3:1}, "a", 1)
m.add_action(3, {3:.5, 4: .5}, "a", 1)
m.add_action(4, {1:1}, "a", 0)
m.add_action(7, {3:1}, "a", 1)
m.add_action(7, {6:1}, "b", 1)
m.add_action(6, {4:.5, 5:.5}, "a", 5)
m.add_action(5, {1:1}, "a", 6)
m.add_action(8, {9:1}, "a", 1)
m.add_action(8, {1:1}, "b", 3)
m.add_action(10, {1:.5, 11:.5}, "a", 2)
m.add_action(0, {0:1}, "r", 0)
m.add_action(9, {9:1}, "r", 0)
m.add_action(11, {11:1}, "a", 1)

MI = BasicES(m, inf, None)
# -

result   = MI.get_min_levels(MIN_INIT_CONS)
expected = [0, 3, 2, 1, 3, 9, 14, 1, 1, 0, 5, 1, 1]
MI

assert result == expected, ("BasicES.get_min_levels(MIN_INIT_CONS) returns" +
    " wrong values:\n" +
    f"  expected: {expected}\n  returns:  {result}\n")
print("Passed test 1 for BasicES.get_min_levels(MIN_INIT_CONS) in test_safety file.")

# If state 11 is not a reload state, we cannot reach reload from 10 for sure.

# +
m.unset_reload(11)
Пример #9
0
m.add_action(3, {6: 1}, "a", 3)

m.add_action(4, {5: 1}, "", 1)
m.add_action(5, {4: 1}, "r", 1)
m.add_action(5, {3: 1}, "t", 1)

m.add_action(6, {7: .5, 10: .5}, "a", 3)
m.add_action(6, {3: .5, 8: .5}, "B", 6)

m.add_action(7, {9: 1}, "", 1)
m.add_action(9, {9: 1}, "", 1)
m.add_action(10, {9: 1}, "", 1)

m.add_action(8, {5: 1}, "r", 3)

solver = BasicES(m, 15, T)
result = solver.get_min_levels(BUCHI)
expected = [6, inf, inf, 3, 0, 1, 10, inf, 4, inf, inf]

assert result == expected, ("get_min_levels(BUCHI) returns" +
                            " wrong values:\n" +
                            f"  expected: {expected}\n  returns:  {result}\n")

# WIth cap < 15, we get all infs
solver = BasicES(m, 14, T)
result = solver.get_min_levels(BUCHI)
expected = [inf, inf, inf, inf, inf, inf, inf, inf, inf, inf, inf]

assert result == expected, ("get_min_levels(BUCHI) returns" +
                            " wrong values:\n" +
                            f"  expected: {expected}\n  returns:  {result}\n")
    "size_y": "size_x",
    "capacity": 60,
}
mdp = prism_to_consmdp("gw_param.prism", constants=constants)
targets = [
    2789, 2768, 2915, 2786, 2566, 2302, 3408, 2260, 2641, 2176, 2695, 2881,
    2893, 3414, 2361, 1835, 2199, 2267, 2967, 2983, 2672, 2977, 2176, 2367,
    3326, 3448, 2903, 2549, 2659, 3408, 2776, 2385, 3185, 3421, 2195, 3292,
    2109, 2379, 3018, 3224, 2559, 3112, 2956, 2448, 2294, 2790, 2464, 3291,
    2294, 2546, 2200, 625, 676, 729, 2547, 2386, 2696, 2177, 2660, 2295, 2110
]
for target in [2110]:
    print("========construction on randomly chosen target========")
    cap = 60
    targets = [target]
    solver = BasicES(mdp, cap, targets)

    SF = solver.get_min_levels(SAFE)
    SFR = solver.get_min_levels(POS_REACH)
    SFRR = solver.get_min_levels(BUCHI)

    formula = 'Pmax=? [F "target" ]'
    prop = stormpy.parse_properties(formula)

    bisimulation_mdp, nstorm_result, n_strategy = MRP_by_naive(
        mdp, targets, cap, SF)

    smart_mdp, storm_result, p_strategy = MRP_by_pruned(
        mdp, targets, cap, SF, SFR, SFRR)

    smarter_mdp, smarter_storm_result, q_strategy = MRP_by_quotient(
Пример #11
0
from fimdp.io import prism_to_consmdp, parse_cap_from_prism, \
    consmdp_to_storm_consmdp, storm_sparsemdp_to_consmdp, \
    encode_to_stormpy
from fimdp.energy_solvers import BasicES
from fimdp.objectives import BUCHI, AS_REACH
from fimdp.examples.cons_mdp import little_alsure
from fimdp.explicit import product_energy
import stormpy

mdp = prism_to_consmdp("prism_models/gw_50_full.prism")
assert mdp.num_states == 2500, ("Wrong number of states: "
                                f"{mdp.num_states} instead of 2500.")
solver = BasicES(mdp, cap=180, targets=[620])
solver.get_min_levels(BUCHI)

expected = "0: x=0 y=0"
result = mdp.names[0]
assert result == expected, ("Wrong state name, should be: "
                            f"`{expected}` and is `{result}`.")
print("Passed test 1 for prism_to_consmdp.")

try:
    prism_to_consmdp("prism_models/gw_50_norew.prism")
    assert False, "Detection of missing consumption failed"
except ValueError as e:
    print("Passed test 2 for prism_to_consmdp. [no consumption]")

try:
    prism_to_consmdp("prism_models/gw_50_norel.prism")
    assert False, "Detection of missing reloads failed"
except ValueError as e:
Пример #12
0
# ### Simple goal-leaning example

# Consider the following example ConsMDP. In state 0, both actions lead to the state 1 with some probability and otherwise stay in 0. And they do this with the same consumption. In other words, they are equally good for reaching the green state if we ignore the transition probabilities.

from fimdp.examples.cons_mdp import goal_leaning
gl, T = goal_leaning()
gl.show(targets=T)

# The basic solver completely ignores the transition probabilities and chooses **any** of the two actions. In fact, it chooses the one that was added as the first at the creation of the ConsMDP. In our case, it chooses the action `top`. The goal-leaning solver chooses `bottom`, which has a bigger probability to move on.

# +
from fimdp.energy_solvers import BasicES, GoalLeaningES
from fimdp.objectives import BUCHI

basic = BasicES(gl, 10, targets=T)
goal = GoalLeaningES(gl, 10, targets=T)
print(f"Selection rule for state 0 given by the basic solver:", basic.get_selector(BUCHI)[0])
print(f"Selection rule for state 0 given by the goal-leaning solver:", goal.get_selector(BUCHI)[0])
# -

# What is the change? When choosing from equally good actions, the goal-leaning solver chooses the most-likely successful action.
#
# #### More technical explenation
# The measure of *goodness* in the sentence above means *low value of `action_value_T`, which is the least amount of energy needed to satisfy the objective by this action*. The `action_value_T` is $\mathit{SPR-Val}$ in the CAV paper. And basically, this value is sufficient to play this action and always survive, and continue towards targets if we are lucky and the outcome of this action is the one desired. In contrast with the Basic solver, `action_value_T` returns not only the action value, but also the probability that the outcome of this action will the one that produced this value. Then from actions with minimal value we choose the one with the highest probability of reaching the desired outcome.

result = goal.get_selector(BUCHI)[0][0].label
expected = 'bottom'
assert result == expected, (
    f"The goal-leaning strategy should prefer the action `{expected}` " +
    f"in state 0. It chooses `{result}` instead."
Пример #13
0
# # Test computation of minimal capacity

from reachability_examples import ultimate
from fimdp.energy_solvers import BasicES
from fimdp.objectives import AS_REACH, BUCHI
from fimdp.mincap_solvers import bin_search

# In the following example, the minimal capacity is 15 for Buchi objective defined by the blue states.

m, T = ultimate()
solver15 = BasicES(m, 15, T)
solver15.get_min_levels(BUCHI)
solver15

solver14 = BasicES(m, 14, T)
solver14.get_min_levels(BUCHI)
solver14

# +
result = bin_search(m, 0, T)
expected = 15

assert result == expected, (
    f"The minimal capacity should be {expected}, not {result}.")
print("Passed test 1 for bin_search() in test_mincap.py file.")
# -

# ### Capacity too small
# If the starting capacity is not enough, an exception should be raised.

try: