from Markov.markov import TransProb, MarkovChain, MarkovModel

print "\n=== Homework 6.1 ==="
OBSERVATIONS = (("A", "B", "C", "A", "B", "C"), ("A", "A", "B", "B", "C", "C"),
                ("A", "A", "A", "C", "C", "C"))
t = TransProb(OBSERVATIONS)
t.report()

print "\n=== Homework 6.2 ==="
CHAIN = ("A", 1.0, {True: 0.9, False: 0.5})
c = MarkovChain(CHAIN)
print "stationary distribution: P(A) = %.4f" % c.stationary_distribution(True)
print "stationary distribution: P(B) = %.4f" % c.stationary_distribution(False)

print "\n=== Homework 6.3 ==="
MODEL = ("A", 0.5, {True: 0.5, False: 0.5}, "X", {True: 0.1, False: 0.8})
m = MarkovModel(MODEL)
m.p({"A0": True}, {"X0": True})
m.p({"A1": True}, {"X0": True})
m.p({"A1": True}, {"X0": True, "X1": True})

print "\n=== Homework 6.11 ==="
from Game.game import Game
HW6_11 = {
    "players": (("B", "d", "e", "f"), ("A", "a", "b", "c")),
    "matrix": (
        ((3, 3), (5, 0), (2, 1)),
        ((2, 4), (7, 8), (4, 6)),
        ((7, 5), (8, 5), (5, 3)),
    )
}
Example #2
0
from Markov.markov import MarkovChain, TransProb, MarkovModel

print "\nWeather Example"
WEATHER_CHAIN = ("R", 1.0, {True: 0.6, False: 0.2})
c = MarkovChain(WEATHER_CHAIN, 3)
c.p({"R1":True})
c.p({"R2":True})
c.p({"R3":True})
print "stationary distribution: P(R) = %.4f" % c.stationary_distribution()

print "\nA Example"
A_CHAIN = ("A", 1.0, {True: 0.5, False: 1.0})
c = MarkovChain(A_CHAIN, 3)
c.p({"A1":True})
c.p({"A2":True})
c.p({"A3":True})
print "stationary distribution: P(A) = %.4f" % c.stationary_distribution()

print "\nTransition Probabilities 1"
TRANSITIONS = [("R","S","S","S","R","S","R")]
t = TransProb(TRANSITIONS)
t.report()

print "\nTransition Probabilities 2"
TRANSITIONS = [("S","S","S","S","S","R","S","S","S","R","R")]
t = TransProb(TRANSITIONS)
t.report()

print "\nTransition Probabilities 3"
TRANSITIONS = [("R","S","S","S","S")]
t = TransProb(TRANSITIONS)
from Markov.markov import TransProb, MarkovChain, MarkovModel

print "\n=== Homework 6.1 ==="
OBSERVATIONS = (
    ("A", "B", "C", "A", "B", "C"),
    ("A", "A", "B", "B", "C", "C"),
    ("A", "A", "A", "C", "C", "C")
)
t = TransProb(OBSERVATIONS)
t.report()

print "\n=== Homework 6.2 ==="
CHAIN = ("A", 1.0, {True: 0.9, False: 0.5})
c = MarkovChain(CHAIN)
print "stationary distribution: P(A) = %.4f" % c.stationary_distribution(True)
print "stationary distribution: P(B) = %.4f" % c.stationary_distribution(False)

print "\n=== Homework 6.3 ==="
MODEL = ("A", 0.5, {True: 0.5, False: 0.5}, "X", {True: 0.1, False: 0.8})
m = MarkovModel(MODEL)
m.p({"A0":True}, {"X0":True})
m.p({"A1":True}, {"X0":True})
m.p({"A1":True}, {"X0":True, "X1":True})

print "\n=== Homework 6.11 ==="
from Game.game import Game
HW6_11 = {
    "players": (
        ("B", "d", "e", "f"),
        ("A", "a", "b", "c")
    ),