Beispiel #1
0
def fit(model, df, methodtype='bayes', verbose=3):
    """Learn the parameters given the DAG and data.

    Description
    -----------
    Maximum Likelihood Estimation
        A natural estimate for the CPDs is to simply use the *relative frequencies*,
        with which the variable states have occured. We observed x cloudy` among a total of `all clouds`,
        so we might guess that about `50%` of `cloudy` are `sprinkler or so.
        According to MLE, we should fill the CPDs in such a way, that $P(\text{data}|\text{model})$ is maximal.
        This is achieved when using the *relative frequencies*.

        While very straightforward, the ML estimator has the problem of *overfitting* to the data.
        If the observed data is not representative for the underlying distribution, ML estimations will be extremly far off.
        When estimating parameters for Bayesian networks, lack of data is a frequent problem.
        Even if the total sample size is very large, the fact that state counts are done conditionally
        for each parents configuration causes immense fragmentation.
        If a variable has 3 parents that can each take 10 states, then state counts will
        be done seperately for `10^3 = 1000` parents configurations.
        This makes MLE very fragile and unstable for learning Bayesian Network parameters.
        A way to mitigate MLE's overfitting is *Bayesian Parameter Estimation*.

    Bayesian Parameter Estimation
        The Bayesian Parameter Estimator starts with already existing prior CPDs,
        that express our beliefs about the variables *before* the data was observed.
        Those "priors" are then updated, using the state counts from the observed data.

        One can think of the priors as consisting in *pseudo state counts*, that are added
        to the actual counts before normalization. Unless one wants to encode specific beliefs
        about the distributions of the variables, one commonly chooses uniform priors,
        i.e. ones that deem all states equiprobable.

        A very simple prior is the so-called *K2* prior, which simply adds `1` to the count of every single state.
        A somewhat more sensible choice of prior is *BDeu* (Bayesian Dirichlet equivalent uniform prior).
        For BDeu we need to specify an *equivalent sample size* `N` and then the pseudo-counts are
        the equivalent of having observed `N` uniform samples of each variable (and each parent configuration).

    Parameters
    ----------
    model : dict
        Contains a model object with a key 'adjmat' (adjacency matrix).
    df : pd.DataFrame()
        Pandas DataFrame containing the data.
    methodtype : str, (default: 'bayes')
        strategy for parameter learning.
        Options are: 'ml' or 'maximumlikelihood' for learning CPDs using Maximum Likelihood Estimators. or 'bayes' for Bayesian Parameter Estimation.
    verbose : int, optional
        Print progress to screen. The default is 3.
            * 0: NONE
            * 1: ERROR
            * 2: WARNING
            * 3: INFO (default)
            * 4: DEBUG
            * 5: TRACE

    Returns
    -------
    dict with model.

    Examples
    --------
    >>> import bnlearn as bn
    >>>
    >>> df = bn.import_example()
    >>> model = bn.import_DAG('sprinkler', CPD=False)
    >>>
    >>> # Parameter learning
    >>> model_update = bn.parameter_learning.fit(model, df)
    >>> bn.plot(model_update)
    >>>
    >>> # LOAD BIF FILE
    >>> model = bn.import_DAG('alarm')
    >>> df = bn.sampling(model, n=1000)
    >>> model_update = bn.parameter_learning.fit(model, df)
    >>> G = bn.plot(model_update)

    """
    config = {}
    config['verbose'] = verbose
    config['method'] = methodtype
    adjmat = model['adjmat']

    # Check whether all labels in the adjacency matrix are included from the dataframe
    # adjmat, model = _check_adjmat(model, df)
    df = _filter_df(adjmat, df, verbose=config['verbose'])

    if config['verbose'] >= 3:
        print('[BNLEARN][PARAMETER LEARNING] Computing parameters using [%s]' %
              (config['method']))
    # Extract model
    if isinstance(model, dict):
        model = model['model']

    # Convert to BayesianModel
    if 'BayesianModel' not in str(type(model)):
        model = to_BayesianModel(adjmat, verbose=config['verbose'])

    # pe = ParameterEstimator(model, df)
    # print("\n", pe.state_counts('Cloudy'))
    # print("\n", pe.state_counts('Sprinkler'))

    # Learning CPDs using Maximum Likelihood Estimators
    if config['method'] == 'ml' or config['method'] == 'maximumlikelihood':
        # mle = MaximumLikelihoodEstimator(model, df)
        model = MaximumLikelihoodEstimator(model, df)
        for node in model.state_names:
            print(model.estimate_cpd(node))

    #  Learning CPDs using Bayesian Parameter Estimation
    if config['method'] == 'bayes':
        model.fit(df,
                  estimator=BayesianEstimator,
                  prior_type="BDeu",
                  equivalent_sample_size=1000)

        for cpd in model.get_cpds():
            if config['verbose'] >= 3:
                print("CPD of {variable}:".format(variable=cpd.variable))
            if config['verbose'] >= 3: print(cpd)

    out = {}
    out['model'] = model
    out['adjmat'] = adjmat
    out['config'] = config

    return (out)
Beispiel #2
0
def fit(model, variables=None, evidence=None, verbose=3):
    """Inference using using Variable Elimination.

    Parameters
    ----------
    model : dict
        Contains model.
    variables : List, optional
        For exact inference, P(variables | evidence). The default is None.
            * ['Name_of_node_1']
            * ['Name_of_node_1', 'Name_of_node_2']
    evidence : dict, optional
        For exact inference, P(variables | evidence). The default is None.
            * {'Rain':1}
            * {'Rain':1, 'Sprinkler':0, 'Cloudy':1}
    verbose : int, optional
        Print progress to screen. The default is 3.
            * 0: NONE
            * 1: ERROR
            * 2: WARNING
            * 3: INFO (default)
            * 4: DEBUG
            * 5: TRACE

    Returns
    -------
    q.

    Examples
    --------
    >>> import bnlearn as bn
    >>>
    >>> # Load example data
    >>> model = bn.import_DAG('sprinkler')
    >>> bn.plot(model)
    >>>
    >>> # Do the inference
    >>> q1 = bn.inference.fit(model, variables=['Wet_Grass'], evidence={'Rain':1, 'Sprinkler':0, 'Cloudy':1})
    >>> q2 = bn.inference.fit(model, variables=['Wet_Grass','Rain'], evidence={'Sprinkler':1})

    """
    if not isinstance(model, dict):
        raise Exception(
            '[bnlearn] >Error: Input requires a DAG that contains the key: model.'
        )
    adjmat = model['adjmat']
    if not np.all(np.isin(variables, adjmat.columns)):
        raise Exception(
            '[bnlearn] >Error: [variables] should match names in the model (Case sensitive!)'
        )
    if not np.all(np.isin([*evidence.keys()], adjmat.columns)):
        raise Exception(
            '[bnlearn] >Error: [evidence] should match names in the model (Case sensitive!)'
        )
    if verbose >= 3: print('[bnlearn] >Variable Elimination..')

    # Extract model
    if isinstance(model, dict):
        model = model['model']

    # Check BayesianModel
    if 'BayesianModel' not in str(type(model)):
        if verbose >= 1:
            print(
                '[bnlearn] >Warning: Inference requires BayesianModel. hint: try: parameter_learning.fit(DAG, df, methodtype="bayes") <return>'
            )
        return None

    # Convert to BayesianModel
    if 'BayesianModel' not in str(type(model)):
        model = to_BayesianModel(adjmat, verbose=verbose)

    try:
        model_infer = VariableElimination(model)
    except:
        raise Exception(
            '[bnlearn] >Error: Input model does not contain learned CPDs. hint: did you run parameter_learning.fit?'
        )

    # Computing the probability of Wet Grass given Rain.
    q = model_infer.query(variables=variables,
                          evidence=evidence,
                          show_progress=(verbose > 0))
    if verbose >= 3: print(q)
    # for varname in variables: print(q[varname])
    return (q)