コード例 #1
0
ファイル: demo4.py プロジェクト: spouros/StochasticsLabPublic
def calculateProbabilities(markov_table, init_dist):
    mc = markov_chain(markov_table, init_dist)
    experiments = 500000
    steps = 40
    visits = 0

    for index in range(experiments):
        mc.start()
        for j in range(steps):
            mc.move()
        if mc.running_state == 1: visits += 1

    probability = visits / experiments
    return probability
コード例 #2
0
ファイル: lab3.py プロジェクト: KottonP/Projects
def mc_est(n: int) -> float:

    markov_table = {  # Transition Table
        0: {
            1: .5,
            2: .5
        },  # from state 0 we move to state 1 with prob 0.5 and to state 2 with 0.5
        1: {
            0: 1 / 3,
            3: 2 / 3
        },
        2: {
            2: 1.
        },
        3: {
            0: .5,
            3: .25,
            4: .25
        },
        4: {
            4: 1.
        }
    }

    # Initial Distribution
    init_dist = {0: 1.}  # we start from state 0 with probability 1

    mc = markov_chain(markov_table, init_dist)

    sample_size = n  # Ν
    running_total = 0

    for i in range(sample_size):
        mc.start()
        while mc.running_state != 2 and mc.running_state != 4:
            mc.move()
        running_total += mc.steps  # steps it took to be absorbed

    mc_estimate = running_total / sample_size
    return mc_estimate
コード例 #3
0
        States[8]: {States[11]: p, States[12]: q},
        States[9]: {States[12]: p, States[16]: q},
        States[10]: {States[13]: p, States[14]: q},
        States[11]: {States[14]: p, States[15]: q},
        States[12]: {States[15]: p, States[16]: q},
        States[13]: {States[13]: 1.},
        States[14]: {States[13]: p, States[11]: q},
        States[15]: {States[11]: p, States[16]: q},
        States[16]: {States[16]: 1.}
    }


    init_probs = {States[0]: 1.0} 
    # Ok... we are ready know
    # Let"s construct a Markov Chain. So let"s call the constructor
    mc = markov_chain(markov_table, init_probs)
    N = 20000
    #print ("N = ",N)
    ## Experiment parameters
    #N = 1000     # number of samples
    steps = 0  # the target time
    #stepsTrack = []
    counter = 0  # to count the number of times the event {X_40  = 1} occurs

    ## Simulation
    for i in range(N):
        mc.start()  # new experiment
        while(mc.running_state not in [States[13],States[16]]):  
            mc.move()
            steps += 1
        if mc.running_state == States[13]:  
コード例 #4
0
    markov_game = {
        '0': [('1', p), ('2', 1 - p)],
        '1': [('1', p), ('2', 1 - p)],
        '2': [('1', p), ('2', 1 - p)]
    }
    markov_tie = {
        '0': [('1', p), ('2', 1 - p)],
        '1': [('3', p), ('0', 1 - p)],
        '2': [('0', p), ('4', 1 - p)],
        '3': [('3', 1)],
        '4': [('4', 1)]
    }

    # Ok... we are ready know
    # Let's construct a Markov Chain. So let's call the constructor
    # Experiment parameters
    N = 100000
    counter = 0
    for i in xrange(N):
        m1 = lib.markov_chain(init_probs, markov_game)
        m1.start()
        res = game(m1)
        if res == 0:
            m2 = lib.markov_chain(init_probs, markov_tie)
            m2.start()
            res = tie(m2)
        if res == 1:
            counter += 1
# and check if we end up in one of the goal_states
    print "So we estimate the Pr to win  by", float(counter) / N
コード例 #5
0
              ('4', 2 / 11.0), ('5', 2 / 11.0), ('6', 2 / 11.0)],
        '2': [('1', 2 / 11.0), ('2', 1 / 11.0), ('3', 2 / 11.0),
              ('4', 2 / 11.0), ('5', 2 / 11.0), ('6', 2 / 11.0)],
        '3': [('1', 2 / 11.0), ('2', 2 / 11.0), ('3', 1 / 11.0),
              ('4', 2 / 11.0), ('5', 2 / 11.0), ('6', 2 / 11.0)],
        '4': [('1', 2 / 11.0), ('2', 2 / 11.0), ('3', 2 / 11.0),
              ('4', 1 / 11.0), ('5', 2 / 11.0), ('6', 2 / 11.0)],
        '5': [('1', 2 / 11.0), ('2', 2 / 11.0), ('3', 2 / 11.0),
              ('4', 2 / 11.0), ('5', 1 / 11.0), ('6', 2 / 11.0)],
        '6': [('1', 2 / 11.0), ('2', 2 / 11.0), ('3', 2 / 11.0),
              ('4', 2 / 11.0), ('5', 2 / 11.0), ('6', 1 / 11.0)],
    }

    # Ok... we are ready know
    # Let's construct a Markov Chain. So let's call the constructor
    m = lib.markov_chain(init_probs, markov_table)

    # Experiment parameters
    N = 100000
    steps = int(raw_input())
    counter = 0
    for i in xrange(N):
        # Let's initiate the running state for t=0.
        # We must do this every time we want to restart the Chain
        m.start()
        # Let the markov chain move by 20 steps
        for j in xrange(steps):
            m.move()
# and check if we end up in one of the goal_states
        if m.running_state == '1':
            counter += 1
コード例 #6
0
ファイル: TennisMCSampleV.py プロジェクト: KottonP/Projects
    '0-30': {'15-30': p, '0-40': 1 - p},

    '40-0': {'GameA': p, '40-15': 1 - p}, '30-15': {'40-15': p, 'Deuce': 1 - p},  # 4th row
    '15-30': {'Deuce': p, '15-40': 1 - p}, '0-40': {'15-40': p, 'GameB': 1 - p},

    '40-15': {'GameA': p, 'AdvA': 1 - p}, 'Deuce': {'AdvA': p, 'AdvB': 1 - p}, '15-40': {'AdvB': p, 'GameB': 1 - p},
    # 5th row

    'GameA': {'GameA': 1.}, 'AdvA': {'GameA': p, 'Deuce': 1 - p},  # 6th row
    'AdvB': {'Deuce': p, 'GameB': 1 - p}, 'GameB': {'GameB': 1.}
}

initial_dist = {'0-0': 1.}  # Every game starts from 0-0

# Markov Chain construction
mc = markov_chain(markov_table, initial_dist)

# Experiment parameters
N: int = 1000   # number of samples
M: int = 500     # Sample size
p_list = []     # List of the phat estimates


# Simulation
for j in range(M):
    counter = 0
    for i in range(N):
        mc.start()  # new experiment
        while True:
            mc.move()
            if mc.running_state == 'GameA':
コード例 #7
0
def TennisWinProb(
    p: float, n: int
) -> float:  # Function to compute probability of PlayerA winning, given
    # probability of point p and number of samples n.
    markov_table = {
        '0-0': {
            '15-0': p,
            '0-15': 1 - p
        },  # 1st row of the pic
        '15-0': {
            '30-0': p,
            '15-15': 1 - p
        },
        '0-15': {
            '15-15': p,
            '0-30': 1 - p
        },  # 2nd row
        '30-0': {
            '40-0': p,
            '30-15': 1 - p
        },
        '15-15': {
            '30-15': p,
            '15-30': 1 - p
        },  # 3rd row
        '0-30': {
            '15-30': p,
            '0-40': 1 - p
        },
        '40-0': {
            'GameA': p,
            '40-15': 1 - p
        },
        '30-15': {
            '40-15': p,
            'Deuce': 1 - p
        },  # 4th row
        '15-30': {
            'Deuce': p,
            '15-40': 1 - p
        },
        '0-40': {
            '15-40': p,
            'GameB': 1 - p
        },
        '40-15': {
            'GameA': p,
            'AdvA': 1 - p
        },
        'Deuce': {
            'AdvA': p,
            'AdvB': 1 - p
        },
        '15-40': {
            'AdvB': p,
            'GameB': 1 - p
        },
        # 5th row
        'GameA': {
            'GameA': 1.
        },
        'AdvA': {
            'GameA': p,
            'Deuce': 1 - p
        },  # 6th row
        'AdvB': {
            'Deuce': p,
            'GameB': 1 - p
        },
        'GameB': {
            'GameB': 1.
        }
    }

    initial_dist = {'0-0': 1.}  # Every game starts from 0-0

    # Markov Chain construction
    mc = markov_chain(markov_table, initial_dist)
    counter = 0

    # Simulation
    for i in range(n):
        mc.start()  # new experiment
        while True:
            mc.move()
            if mc.running_state == 'GameA':
                counter += 1
                break
            elif mc.running_state == 'GameB':
                break

    phat: float = counter / n

    return phat