コード例 #1
0
def q8(variable, conditions, value):

    # Do enumeration ask
    probdist = enumeration_ask(variable, conditions, chatbot)

    # Parse input to output in correct format

    for var in conditions:
        if var == 'ProblemSize':
            conditions[var] = psize_dict[conditions[var]]
        if var == 'ConversationLength':
            conditions[var] = clength_dict[conditions[var]]
    prob = round(probdist.prob[value], 4)

    if type(value) == int:
        value = clength_dict[value]

    return 'Probability of ' + variable + ' being ' + str(value) + ' given ' + \
           str(conditions) + ': ' + str(prob)
コード例 #2
0
print(spam_filter.filter(["i", "am", "spam", "spam", "i", "am"])) # spam list
print(spam_filter.filter(["do", "i", "like", "green", "eggs", "and", "ham"])) # ham list
print(spam_filter.filter(["i", "do", "not", "like", "that", "spamiam"]))


# Problem 2a
# TODO: copy into Jupyter notebook

from probability import BayesNet, enumeration_ask, elimination_ask, gibbs_ask

# Utility variables
T, F = True, False

grass = BayesNet([
    ('Cloudy', '', 0.5),
    ('Sprinkler', 'Cloudy', {T:0.1, F:0.5}),
    ('Rain', 'Cloudy', {T:0.8, F:0.2}),
    ('WetGrass', 'Sprinkler Rain', {(T, T):0.99, (T,F):0.9, (F,T):0.9, (F,F):0.0}),
    ])

print("\n\nProblem 2")
print("i. " + enumeration_ask('Cloudy', dict(), grass).show_approx())

print("ii. " + enumeration_ask('Sprinkler', dict(Cloudy=T), grass).show_approx())

print("iii. " + enumeration_ask('Cloudy', dict(Sprinkler=T, Rain=F), grass).show_approx())

print("iv. " + enumeration_ask('WetGrass', dict(Cloudy=T, Sprinkler=T, Rain=T), grass).show_approx())

print("v. " + enumeration_ask('Cloudy', dict(WetGrass=F), grass).show_approx())
コード例 #3
0
# From AIMA code (probability.py) - Fig. 14.2 - burglary example
sunny = BayesNet([
    ('Sunny', '', 0.7),
    ('Raise', '', 0.01),
    ('Happy', 'Sunny Raise', {
        (T, T): 1.0,
        (T, F): 0.7,
        (F, T): 0.9,
        (F, F): 0.1
    }),
])

# P(Raise | sunny)
#   These are independent, so it's just P(Raise)
#   = <0.01, 0.99>
print(enumeration_ask('Raise', dict(Sunny=T), sunny).show_approx())

# P(Raise | happy ∧ sunny)
#   = a * P(H | S, R) * P(R)
#   = a * 1.0 * 0.01
#   <0.0142, 0.986>
print(enumeration_ask('Raise', dict(Happy=T, Sunny=T), sunny).show_approx())

# P(Raise | happy)
# <0.0185, 0.982>
print(enumeration_ask('Raise', dict(Happy=T), sunny).show_approx())

# P(Raise | happy ∧ ¬sunny)
# <0.0833, 0.917>
print(enumeration_ask('Raise', dict(Happy=T, Sunny=F), sunny).show_approx())
コード例 #4
0
                          T: 0.1,
                          F: 0.5
                      }), ('Rain', 'Cloudy', {
                          T: 0.8,
                          F: 0.2
                      }),
                      ('WetGrass', 'Sprinkler Rain', {
                          (T, T): 0.99,
                          (T, F): 0.90,
                          (F, T): 0.9,
                          (F, F): 0.0
                      })])

# d.
print('P(Cloudy)')
print(enumeration_ask('Cloudy', dict(), wet_lawns).show_approx())

print('P(Sprinker | cloudy')
print(enumeration_ask('Sprinker', dict(Cloudy=T), wet_lawns).show_approx())

print('P(Cloudy| the sprinkler is running and it’s not raining)')
print(
    enumeration_ask('Cloudy', dict(Sprinker=T, Rain=F),
                    wet_lawns).show_approx())

print('P(WetGrass | it’s cloudy, the sprinkler is running and it’s raining)')
print(
    enumeration_ask('WetGrass', dict(Cloudy=T, Sprinker=T, Rain=T),
                    wet_lawns).show_approx())

print('P(Cloudy | the grass is not wet)')
コード例 #5
0
'''

from probability import BayesNet, enumeration_ask, elimination_ask

# Utility variables
T, F = True, False

cancer = BayesNet([
    ('Cancer', '', 0.01),
    ('Test1', 'Cancer', {T: 0.9, F: 0.2}),
    ('Test2', 'Cancer', {T: 0.9, F: 0.2}),
])

# a. P(Cancer | Test1 ^ Test2)
# This calculates the probability of cancer given that test1 and test2 were positive
print(enumeration_ask('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())
print(elimination_ask('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())

# b. P(Cancer | Test1 ^ -Test2)
# This calculates the probability of cancer given that test1 was positive and test2 was negative
print(enumeration_ask('Cancer', dict(Test1=T, Test2=F), cancer).show_approx())
print(elimination_ask('Cancer', dict(Test1=T, Test2=F), cancer).show_approx())

# These results make sense. One failed test reduces the likelihood of having cancer by two orders of magnitude

# Here is P(Cancer | Test1 ^ Test2) worked out by hand
# = alpha * <P(cancer) * P(test1 | cancer) * P(test2 | cancer),
#            P(-cancer) * P(test1 | -cancer) * P(test2 | -cancer)>
# = alpha * <0.01 * 0.9 * 0.9, 0.99 * 0.2 * 0.2>
# = alpha * <0.0081, 0.0396>
# = <0.17, 0.83>
コード例 #6
0
ファイル: lab_1.py プロジェクト: ekm6/cs344
# Utility variables
T, F = True, False

# From AIMA code (probability.py) - Fig. 14.2 - burglary example
burglary = BayesNet([
    ('Burglary', '', 0.001),
    ('Earthquake', '', 0.002),
    ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
    ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
    ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
    ])



#1) Compute P(Alarm | burglary ∧ ¬earthquake)
print(enumeration_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())

#2) Compute P(John | burglary ∧ ¬earthquake).
print(enumeration_ask('JohnCalls', dict(Burglary=T, Earthquake=F), burglary).show_approx())

#3) Compute P(Burglary | alarm)
print(enumeration_ask('Burglary', dict(Alarm=T), burglary).show_approx())

#4) P(Burglary | john ∧ mary)

print("Enumeration ask Method:")
print(enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx(), '\n\n')

print("Rejection Sampling:")
print(elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx(), '\n\n')
コード例 #7
0
T, F = True, False

weather = BayesNet([
    ('Sunny', '', 0.7),
    ('Raise', '', 0.01),
    ('Happy', 'Sunny Raise', {
        (T, T): 1.0,
        (T, F): 0.7,
        (F, T): 0.9,
        (F, F): 0.1
    }),
])

# a. P(Cancer | positive results on both tests)
print("a.")
print("i. " + enumeration_ask('Raise', dict(Sunny=T), weather).show_approx())
print("ii. " +
      enumeration_ask('Raise', dict(Happy=T, Sunny=T), weather).show_approx())
"""
By hand:
i. P(Raise | sunny) = <0.01, 0.99>
Which is taken directly from the table because raise does not depend on sunny and we have the probability of a raise.

ii. Using a diagnostic computation
P(Raise | happy ^ sunny) = alpha * sum[P(Raise, happy, sunny)] = alpha * sum[P(Raise) * P(sunny) * P(happy | raise ^ sunny) ] = alpha * < 0.01*0.7*1.0, 0.99*0.7*0.9> = alpha * <0.007, 0.6237> = <0.007, 0.6237> / 0.6307 = <0.011, 0.989>
TODO: figure out why this is coming out slightly off (but it is close)
"""

# b. P(Cancer | a positive result on test 1, but a negative result on test 2)
print("b.")
print("i. " + enumeration_ask('Raise', dict(Happy=T), weather).show_approx())
コード例 #8
0
ファイル: lab_3.py プロジェクト: SaHendriksen/cs344
# From AIMA code (probability.py) - Fig. 14.2 - burglary example
happiness = BayesNet([
    ('Sunny', '', 0.70),
    ('Raise', '', 0.01),
    ('Happy', 'Sunny Raise', {
        (T, T): 1.0,
        (T, F): 0.70,
        (F, T): 0.90,
        (F, F): 0.10
    }),
])

# Exercise 5.3
# a)
# i)
print(enumeration_ask('Raise', dict(Sunny=T), happiness).show_approx())
# Output - False: .99, True: .01
# Hand-computed -
#   P(R | s) = < .01, .99 > --> they are independent events so it is just P(R)

# ii)
print(
    enumeration_ask('Raise', dict(Sunny=T, Happy=T), happiness).show_approx())
# Output - False: .986, .0142
# Hand-computed -
#   P(R | h and s) = alpha P(R, h, s)
#                  = alpha P(h | R and s) * P(s) * p(R)
#                  = alpha < P(h | R and s) * P(s) * p(R), P(h | not R and s) * P(s) * p(not R) >
#                  = alpha < 1.0 * .7 * .01, .7 * .7 * .99 >
#                  = alpha < .007, .4851 >
#                  = < .0142, .986 >
コード例 #9
0
ファイル: lab_1.py プロジェクト: mjm87/cs344
                         (T, T): 0.95,
                         (T, F): 0.94,
                         (F, T): 0.29,
                         (F, F): 0.001
                     }), ('JohnCalls', 'Alarm', {
                         T: 0.90,
                         F: 0.05
                     }), ('MaryCalls', 'Alarm', {
                         T: 0.70,
                         F: 0.01
                     })])

# Exercise 5.1

# a. The answer makes sense given that it can be taken straight off the chart
print("P(Alarm | burglary ^ !earthquake) => " + enumeration_ask(
    'Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())

# b. This also matches the intuitive notion that P(J|B^!e) = P(J|A) * P(A|B^!e)
print("P(John | burglary ^ !earthquake) => " + enumeration_ask(
    'JohnCalls', dict(Burglary=T, Earthquake=F), burglary).show_approx())

# c. This makes sense given that this is the answer we got in class.
#    It also makes sense that it's pretty low thanks to the potential of the earthquake being about twice as likely
#    as a burglary.
print("P(Burglary | alarm) => " +
      enumeration_ask('Burglary', dict(Alarm=T), burglary).show_approx())

# d. This matches with the answer you showed in hand up top. It also makes sense that it is lower than the P(B | a),
#    because neither John or Mary have a reliable chance of reporting the alarm (90 and 70 respectively)
print("P(Burglary | john ^ mary) => " + enumeration_ask(
    'Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
コード例 #10
0
sys.path.insert(0, '../tools/aima')
from probability import BayesNet, enumeration_ask, elimination_ask, gibbs_ask

# Utility variables
T, F = True, False

happyNetwork = BayesNet([('Sunny', '', 0.7), ('Raise', ' ', 0.01),
                         ('Happy', 'Sunny Raise', {
                             (T, T): 1.0,
                             (T, F): 0.7,
                             (F, T): 0.9,
                             (F, F): 0.1
                         })])

print("BP ( Raise | Sunny )")
print(enumeration_ask('Raise', dict(Sunny=T), happyNetwork).show_approx())
print("BP ( Raise | Happy & Sunny )")
print(
    enumeration_ask('Raise', dict(Happy=T, Sunny=T),
                    happyNetwork).show_approx())
'''

BP ( Raise | Sunny )
False: 0.99, True: 0.01
BP ( Raise | Happy & Sunny )
False: 0.986, True: 0.0142

The First Probability Distribution is expected because according to the Bayesian Network, 
there is no causation between the weather being Sunny and getting a Raise

The Second Probability Distrubition makes sense because the probability that you have a raise is increased because there is a 
コード例 #11
0
ファイル: exercise2.py プロジェクト: ChanKim04/cs344
                      ('Sprinkler', 'Cloudy', {
                          T: 0.10,
                          F: 0.50
                      }), ('Rain', 'Cloudy', {
                          T: 0.80,
                          F: 0.20
                      }),
                      ('WetGrass', 'Sprinkler Rain', {
                          (T, T): 0.99,
                          (T, F): 0.90,
                          (F, T): 0.90,
                          (F, F): 0.00
                      })])

print("P(Cloudy)")
print(enumeration_ask('Cloudy', dict(), exercises).show_approx())
print(elimination_ask('Cloudy', dict(), exercises).show_approx())
print("P(Sprinker | cloudy)")
print(enumeration_ask('Sprinkler', dict(Cloudy=T), exercises).show_approx())
print(elimination_ask('Sprinkler', dict(Cloudy=T), exercises).show_approx())
print("P(Cloudy| the sprinkler is running and it’s not raining)")
print(
    enumeration_ask('Cloudy', dict(Sprinkler=T, Rain=F),
                    exercises).show_approx())
print(
    elimination_ask('Cloudy', dict(Sprinkler=T, Rain=F),
                    exercises).show_approx())
print("P(WetGrass | it’s cloudy, the sprinkler is running and it’s raining)")
print(
    enumeration_ask('WetGrass', dict(Cloudy=T, Sprinkler=T, Rain=T),
                    exercises).show_approx())
def Q1_1_2():
    print("=" * 80)
    print("ANSWER 1.1.2")
    T, F = True, False
    bayes_net = BayesNet([('AI', '', 0.8), ('FossilFuel', '', 0.4),
                          ('RenewableEnergy', 'AI FossilFuel', {
                              (T, T): 0.2,
                              (T, F): 0.7,
                              (F, T): 0.02,
                              (F, F): 0.5
                          }), ('Traffic', 'FossilFuel', {
                              T: 0.95,
                              F: 0.1
                          }),
                          ('GlobalWarming', 'RenewableEnergy Traffic', {
                              (T, T): 0.6,
                              (T, F): 0.4,
                              (F, T): 0.95,
                              (F, F): 0.55
                          }),
                          ('Employed', 'AI GlobalWarming', {
                              (T, T): 0.01,
                              (T, F): 0.03,
                              (F, T): 0.03,
                              (F, F): 0.95
                          })])
    #print(bayes_net.variable_node('GlobalWarming').cpt)
    p_employed = enumeration_ask(X='Employed',
                                 e={
                                     'AI': True,
                                     'FossilFuel': True
                                 },
                                 bn=bayes_net)
    print('-' * 80)
    print(f"given AI=true and FossilFuel=True:",
          f"\n\t\tP(Employed)\t\t=\t\t{p_employed.show_approx()}")
    print('-' * 80)
    p_global_warming = elimination_ask(X='GlobalWarming',
                                       e={
                                           'Employed': False,
                                           'Traffic': False
                                       },
                                       bn=bayes_net)
    print('-' * 80)
    print(f"Given Employed=False and Traffic=False, \n\t\tP(GlobalWarming)\t=",
          f"\t\t{p_global_warming.show_approx()}")
    print('-' * 80)
    p_ai = elimination_ask(X='AI',
                           e={
                               'RenewableEnergy': True,
                               'GlobalWarming': True,
                               'Employed': True,
                               'Traffic': True,
                               'FossilFuel': True
                           },
                           bn=bayes_net)
    print('-' * 80)
    print(
        f"Given RenewableEnergy=T, GlobalWarming=T",
        f"Employed=T, Traffic=T, FossilFuel=T, \n\t\tP(AI)\t\t\t=\t\t{p_ai.show_approx()}"
    )
    print('-' * 80)
コード例 #13
0
T, F = True, False

# from figure in Exercise 5.3
happy = BayesNet([('Sunny', '', 0.7), ('Raise', '', 0.01),
                  ('Happy', 'Sunny Raise', {
                      (T, T): 1.0,
                      (T, F): 0.7,
                      (F, T): 0.9,
                      (F, F): 0.1
                  })])

print()

# Exercise 5.3 PART A
# i. P(Raise | Sunny)
print("a. i.  ", enumeration_ask('Raise', dict(Sunny=T), happy).show_approx())
# ii. P(Raise | Happy ^ Sunny)
print("   ii. ",
      enumeration_ask('Raise', dict(Happy=T, Sunny=T), happy).show_approx())

# Exercise 5.3 PART B
# i. P(Raise | Happy)
print("b. i.  ", enumeration_ask('Raise', dict(Happy=T), happy).show_approx())
# ii. P(Raise | Happy ^ ¬Sunny)
print('   ii. ',
      enumeration_ask('Raise', dict(Happy=T, Sunny=F), happy).show_approx())
'''
Work by hand:
a) i.  P(Raise | Sunny) = P(Raise)
                        = < P(Raise), P(¬Raise) >
                        = < 0.99, 0.01 >
コード例 #14
0
# Utility variables
T, F = True, False

# From AIMA code (probability.py) - Fig. 14.2 - burglary example
burglary = BayesNet([
    ('Burglary', '', 0.001),
    ('Earthquake', '', 0.002),
    ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
    ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
    ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
    ])

# P(Alarm | burglary ∧ ¬earthquake)
# From the table: < .04, .96 >
print(enumeration_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())

# P(John | burglary ∧ ¬earthquake)
#   a * [ P( J | A ) * P( A | B, -E ) + P( J | -A ) * P( -A | B, -E) ]
#   = a * (0.9 * 0.94 + 0.05 * 0.06)
#   = < .151, .849 >
print(elimination_ask('JohnCalls', dict(Burglary=T, Earthquake=F), burglary).show_approx())

# P(Burglary | alarm) =
#   a * [ P(B) * P(E) * P( A | B, E ) + P(B) * P(-E) * P( A | B, -E) ]
#   = a * (0.001 * 0.002 * 0.95 + 0.001 * 0.998 * 0.94)
#   = <0.374, 0.626>
print(elimination_ask('Burglary', dict(Alarm=T), burglary).show_approx())

# P(Burglary | john ∧ mary)
#   = <.284, .716>
コード例 #15
0
cancerTest = BayesNet([('Cancer', '', 0.01),
                       ('Test1', 'Cancer', {
                           T: 0.9,
                           F: 0.2
                       }), ('Test2', 'Cancer', {
                           T: 0.9,
                           F: 0.2
                       })])

# Exercise 5.2
# Pull network.py and add enumeration_ask computations for the following examples:
#
# a. P(Cancer | positive results on both tests)
print(
    "\n P(Cancer | T1 ∧ T2): ",
    enumeration_ask('Cancer', dict(Test1=T, Test2=T),
                    cancerTest).show_approx())

# b. P(Cancer | a positive result on test 1, but a negative result on test 2)
print(
    "\n P(Cancer | T1 ∧ ¬T2): ",
    enumeration_ask('Cancer', dict(Test1=T, Test2=F),
                    cancerTest).show_approx())
'''
Do the results make sense? How much effect does one failed test have on the probability of having cancer?

ANSWER: Failing to test positive on either of the tests will dramatically decrease the likelihood of having cancer. This
        because the conditional probability of getting a false negative is low (0.1).

Explain your answers and work them out by hand.

P(Cancer | T1 ∧ T2) = alpha*<P(Cancer, T1, T2), P(¬Cancer, T1, T2)>
コード例 #16
0
                     ('Alarm', 'Burglary Earthquake', {
                         (T, T): 0.95,
                         (T, F): 0.94,
                         (F, T): 0.29,
                         (F, F): 0.001
                     }), ('JohnCalls', 'Alarm', {
                         T: 0.90,
                         F: 0.05
                     }), ('MaryCalls', 'Alarm', {
                         T: 0.70,
                         F: 0.01
                     })])

# Compute P(Burglary | John and Mary both call).
print(
    enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T),
                    burglary).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(
    elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T),
                    burglary).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(
    gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T),
              burglary).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.

# Exercise 5.1
# Pull network.py and add enumeration_ask computations for the following examples:
#
# a. P(Alarm | burglary ∧ ¬earthquake)
print(
コード例 #17
0
ファイル: cloudy.py プロジェクト: Nicaderp/cs344
        F: .5
    }),
    ('Rain', 'Cloudy', {
        T: .8,
        F: .2
    }),
    ('WetGrass', 'Sprinkler Rain', {
        (T, T): 0.99,
        (T, F): 0.9,
        (F, T): 0.9,
        (F, F): 0.0
    }),
])

print("\nP(Cloudy)")
print(enumeration_ask('Cloudy', dict(), cloudy).show_approx())

print("\nP(Sprinkler | Cloudy)")
print(enumeration_ask('Sprinkler', dict(Cloudy=True), cloudy).show_approx())

print("\nP(Cloudy | Sprinkler and no Rain)")
print(
    enumeration_ask('Cloudy', dict(Sprinkler=True, Rain=False),
                    cloudy).show_approx())

print("\nP(Wet Grass | sprinkler and cloudy and raining)")
print(
    enumeration_ask("WetGrass", dict(Sprinkler=True, Rain=True),
                    cloudy).show_approx())

print("\nP(Cloudy | no wet grass)")
コード例 #18
0
from probability import BayesNet, enumeration_ask, elimination_ask, gibbs_ask, likelihood_weighting, rejection_sampling

# Utility variables
T, F = True, False

# From AIMA code (probability.py) - Fig. 14.2 - burglary example
burglary = BayesNet([
    ('Burglary', '', 0.001),
    ('Earthquake', '', 0.002),
    ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
    ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
    ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
    ])

# Compute P(Burglary | John and Mary both call).
print(enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.

# rejection_sampling() and likelihood_weighting() are also approximation algorithms.
print(rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
print(likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())

# P(Alarm | burglary ∧ ¬earthquake)
print('5.1 i:  \t' + enumeration_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())
# P(John | burglary ∧ ¬earthquake)
print('5.1 ii: \t' + enumeration_ask('John', dict(Burglary=T, Earthquake=F), burglary).show_approx())
# P(Burglary | alarm)
コード例 #19
0
T, F = True, False

# From AIMA code (probability.py) - Fig. 14.2 - burglary example
burglary = BayesNet([('Burglary', '', 0.001), ('Earthquake', '', 0.002),
                     ('Alarm', 'Burglary Earthquake', {
                         (T, T): 0.95,
                         (T, F): 0.94,
                         (F, T): 0.29,
                         (F, F): 0.001
                     }), ('JohnCalls', 'Alarm', {
                         T: 0.90,
                         F: 0.05
                     }), ('MaryCalls', 'Alarm', {
                         T: 0.70,
                         F: 0.01
                     })])

# Compute P(Burglary | John and Mary both call).
print(
    enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T),
                    burglary).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(
    elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T),
                    burglary).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(
    gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T),
              burglary).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.