Exemplo n.º 1
0
# From AIMA code (probability.py) - Fig. 14.2 - burglary example
burglary = BayesNet([
    ('Burglary', '', 0.001),
    ('Earthquake', '', 0.002),
    ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
    ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
    ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
    ])

# Compute P(Burglary | John and Mary both call).
print(enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.

# rejection_sampling() and likelihood_weighting() are also approximation algorithms.
print(rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
print(likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())

# P(Alarm | burglary ∧ ¬earthquake)
print('5.1 i:  \t' + enumeration_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())
# P(John | burglary ∧ ¬earthquake)
print('5.1 ii: \t' + enumeration_ask('John', dict(Burglary=T, Earthquake=F), burglary).show_approx())
# P(Burglary | alarm)
print('5.1 iii:\t' + enumeration_ask('Burglary', dict(Alarm=T), burglary).show_approx())

'''
Answers to section 5.4 of lab05, for CS 344:
Exemplo n.º 2
0
    T: 0.90,
    F: 0.2
})])
'''
Diagnostic inference.  Refer to screen capture and/or turned in paper copy for mathematical explanation.

The probability that you have cancer given that you obtained a positive result on both tests.
'''

# Compute P(Cancer | positive results on both tests)
print("\nP(Cancer | positive results on both tests)")
print(enumeration_ask('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.
print(
    rejection_sampling('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())
print(
    likelihood_weighting('Cancer', dict(Test1=T, Test2=T),
                         cancer).show_approx())
'''
Diagnostic inference.  Refer to screen capture and/or turned in paper copy for mathematical explanation.

The probability that you have cancer given that you obtained a positive result on one test and a negative result
on the other test.
'''

# Compute P(Cancer | a positive result on test 1, but a negative result on test 2)
print(
Exemplo n.º 3
0
    F: 0.50
}),
                   ('WetGrass', 'Sprinkler Rain', {
                       (T, T): 0.99,
                       (T, F): 0.90,
                       (F, T): 0.90,
                       (F, F): 0.00
                   })])

# Compute P(Cloudy)
print("\nP(Cloudy)")
print(enumeration_ask('Cloudy', dict(), cloudy).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Cloudy', dict(), cloudy).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Cloudy', dict(), cloudy).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.
print(rejection_sampling('Cloudy', dict(), cloudy).show_approx())
print(likelihood_weighting('Cloudy', dict(), cloudy).show_approx())

###################################################################################################

# Compute P(Sprinkler | cloudy)
print("\nP(Sprinkler | cloudy)")
print(enumeration_ask('Sprinkler', dict(Cloudy=T), cloudy).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Sprinkler', dict(Cloudy=T), cloudy).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Sprinkler', dict(Cloudy=T), cloudy).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.
print(rejection_sampling('Sprinkler', dict(Cloudy=T), cloudy).show_approx())
Exemplo n.º 4
0
    ('Happy', 'Sunny Raise', {(T, T): 1.0, (T, F): 0.7, (F, T): 0.9, (F, F): 0.1}),
    ])

'''
 Inference.  Refer to screen capture and/or turned in paper copy for mathematical explanation.

The probability that you obtain a raise given that it is sunny.
'''

# Compute P(Raise | sunny)
print("\nP(Raise | sunny)")
print(enumeration_ask('Raise', dict(Sunny=T), happiness).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Raise', dict(Sunny=T), happiness).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Raise', dict(Sunny=T), happiness).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.
print(rejection_sampling('Raise', dict(Sunny=T), happiness).show_approx())
print(likelihood_weighting('Raise', dict(Sunny=T), happiness).show_approx())

'''
 Diagnostic inference.  Refer to screen capture and/or turned in paper copy for mathematical explanation.

The probability that you obtain a raise given that you are happy and it is sunny.
'''

# Compute P(Raise | happy ∧ sunny)
print("\nP(Raise | happy ∧ sunny)")
print(enumeration_ask('Raise', dict(Happy=T, Sunny=T), happiness).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Raise', dict(Happy=T, Sunny=T), happiness).show_approx())
Exemplo n.º 5
0
    ])

'''
Diagnostic inference.  Refer to screen capture and/or turned in paper copy for mathematical explanation.

The probability that a burglary occurs given that john and mary both call, which depends on alarm and earthquake as
evidence variables.
'''

# Compute P(Burglary | John and Mary both call).
print("\nP(Burglary | John and Mary both call)")
print(enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.
print(rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
print(likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())

'''
Causal inference.  Refer to screen capture and/or turned in paper copy for mathematical explanation.

The probability that the alarm goes off given that a burglary occurs and there is no earthquake.
'''

# Compute P(Alarm | burglary ∧ ¬earthquake)
print("\nP(Alarm | burglary ∧ ¬earthquake")
print(enumeration_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())