예제 #1
0
def run_loopyBP(sg_model, maxiter, updates="SEQRND", damping=None):
    if maxiter is None:
        maxiter = LIBDAI_LBP_ITERS

    sg_FactorGraph = build_libdaiFactorGraph_from_SpinGlassModel(
        sg_model, fixed_variables={})
    # sg_FactorGraph = build_graph_from_clique_ising_model(sg_model, fixed_variables={})

    # Write factorgraph to a file
    sg_FactorGraph.WriteToFile('sg_temp.fg')

    # Set some constants
    # maxiter = 10000
    maxiter = maxiter
    # maxiter = 4
    tol = 1e-9
    verb = 1
    # Store the constants in a PropertySet object
    opts = dai.PropertySet()
    opts["maxiter"] = str(maxiter)  # Maximum number of iterations
    opts["tol"] = str(tol)  # Tolerance for convergence
    opts["verbose"] = str(
        verb
    )  # Verbosity (amount of output generated)bpopts["updates"] = "SEQRND"

    ##################### Run Loopy Belief Propagation #####################
    # Construct a BP (belief propagation) object from the FactorGraph sg_FactorGraph
    # using the parameters specified by opts and two additional properties,
    # specifying the type of updates the BP algorithm should perform and
    # whether they should be done in the real or in the logdomain
    bpopts = opts
    #     bpopts["updates"] = "SEQRND"
    #     bpopts["updates"] = "PARALL"
    bpopts["updates"] = updates
    bpopts["logdomain"] = "1"
    if damping is not None:
        bpopts["damping"] = damping

    bp = dai.BP(sg_FactorGraph, bpopts)
    # Initialize belief propagation algorithm
    bp.init()
    # Run belief propagation algorithm
    bp.run()

    # Report log partition sum of sg_FactorGraph, approximated by the belief propagation algorithm
    ln_z_estimate = bp.logZ()

    #     print(type(bp.belief(sg_FactorGraph.var(0))))
    #     print(bp.belief(sg_FactorGraph.var(0))[0])
    #     sleep(aslfkdj)

    return ln_z_estimate
예제 #2
0
 def get_inf_bp(self, logdomain=False, verbose=False):
     sn = dai.FactorGraph(self.vecfac)
     prop = dai.PropertySet()
     prop["tol"] = "1e-9"
     if logdomain:
         prop["logdomain"] = "1"
     else:
         prop["logdomain"] = "0"
     prop["updates"] = "SEQFIX"
     if verbose:
         prop["verbose"] = "1"
     else:
         prop["verbose"] = "0"
     lb_inf = dai.BP(sn, prop)
     return lb_inf
예제 #3
0
def run_inference(sg_model):
    sg_FactorGraph = build_libdaiFactorGraph_from_SpinGlassModel(
        sg_model, fixed_variables={})
    # sg_FactorGraph = build_graph_from_clique_ising_model(sg_model, fixed_variables={})

    # Write factorgraph to a file
    sg_FactorGraph.WriteToFile('sg_temp.fg')
    print('spin glass factor graph written to sg_temp.fg')

    # Output some information about the factorgraph
    print(sg_FactorGraph.nrVars(), 'variables')
    print(sg_FactorGraph.nrFactors(), 'factors')

    # Set some constants
    maxiter = 10000
    tol = 1e-9
    verb = 1
    # Store the constants in a PropertySet object
    opts = dai.PropertySet()
    opts["maxiter"] = str(maxiter)  # Maximum number of iterations
    opts["tol"] = str(tol)  # Tolerance for convergence
    opts["verbose"] = str(
        verb
    )  # Verbosity (amount of output generated)bpopts["updates"] = "SEQRND"

    ##################### Run Loopy Belief Propagation #####################
    print()
    print('-' * 80)
    # Construct a BP (belief propagation) object from the FactorGraph sg_FactorGraph
    # using the parameters specified by opts and two additional properties,
    # specifying the type of updates the BP algorithm should perform and
    # whether they should be done in the real or in the logdomain
    bpopts = opts
    bpopts["updates"] = "SEQRND"
    bpopts["logdomain"] = "1"

    bp = dai.BP(sg_FactorGraph, bpopts)
    # Initialize belief propagation algorithm
    bp.init()
    # Run belief propagation algorithm
    bp.run()

    # Report log partition sum of sg_FactorGraph, approximated by the belief propagation algorithm
    print('Approximate (loopy belief propagation) log partition sum:',
          bp.logZ())

    ##################### Run Tree Re-weighted Belief Propagation #####################
    print()
    print('-' * 80)
    # Construct a BP (belief propagation) object from the FactorGraph sg_FactorGraph
    # using the parameters specified by opts and two additional properties,
    # specifying the type of updates the BP algorithm should perform and
    # whether they should be done in the real or in the logdomain
    trwbp_opts = opts
    trwbp_opts["updates"] = "SEQRND"
    trwbp_opts["nrtrees"] = "10"
    trwbp_opts["logdomain"] = "1"

    trwbp = dai.TRWBP(sg_FactorGraph, trwbp_opts)
    # trwbp = dai.FBP( sg_FactorGraph, trwbp_opts )

    # Initialize belief propagation algorithm
    trwbp.init()
    # Run belief propagation algorithm
    t0 = time.time()
    trwbp.run()
    t1 = time.time()

    # Report log partition sum of sg_FactorGraph, approximated by the belief propagation algorithm
    print(
        'Approximate (tree re-weighted belief propagation) log partition sum:',
        trwbp.logZ())
    print('time =', t1 - t0)

    ##################### Run Junction Tree Algorithm #####################
    print()
    print('-' * 80)
    # Construct a JTree (junction tree) object from the FactorGraph sg_FactorGraph
    # using the parameters specified by opts and an additional property
    # that specifies the type of updates the JTree algorithm should perform
    jtopts = opts
    jtopts["updates"] = "HUGIN"
    jt = dai.JTree(sg_FactorGraph, jtopts)
    # Initialize junction tree algorithm
    jt.init()
    # Run junction tree algorithm
    jt.run()

    # Construct another JTree (junction tree) object that is used to calculate
    # the joint configuration of variables that has maximum probability (MAP state)
    jtmapopts = opts
    jtmapopts["updates"] = "HUGIN"
    jtmapopts["inference"] = "MAXPROD"
    jtmap = dai.JTree(sg_FactorGraph, jtmapopts)
    # Initialize junction tree algorithm
    jtmap.init()
    # Run junction tree algorithm
    jtmap.run()
    # Calculate joint state of all variables that has maximum probability
    jtmapstate = jtmap.findMaximum()
    # Report log partition sum (normalizing constant) of sg_FactorGraph, calculated by the junction tree algorithm
    print()
    print('-' * 80)
    print('Exact log partition sum:', jt.logZ())
예제 #4
0
        jtmap = dai.JTree( fg, jtmapopts )
        # Initialize junction tree algorithm
        jtmap.init()
        # Run junction tree algorithm
        jtmap.run()
        # Calculate joint state of all variables that has maximum probability
        jtmapstate = jtmap.findMaximum()

        # Construct a BP (belief propagation) object from the FactorGraph fg
        # using the parameters specified by opts and two additional properties,
        # specifying the type of updates the BP algorithm should perform and
        # whether they should be done in the real or in the logdomain
        bpopts = opts
        bpopts["updates"] = "SEQRND"
        bpopts["logdomain"] = "0"
        bp = dai.BP( fg, bpopts )
        # Initialize belief propagation algorithm
        bp.init()
        # Run belief propagation algorithm
        bp.run()

        # Construct a BP (belief propagation) object from the FactorGraph fg
        # using the parameters specified by opts and two additional properties,
        # specifying the type of updates the BP algorithm should perform and
        # whether they should be done in the real or in the logdomain
        #
        # Note that inference is set to MAXPROD, which means that the object
        # will perform the max-product algorithm instead of the sum-product algorithm
        mpopts = opts
        mpopts["updates"] = "SEQRND"
        mpopts["logdomain"] = "0"