'default': 0.2 }, '-t': { 'name': 'timeout', 'type': 'int', 'default': 200 } } arg = ArgParser(sys.argv[2:], opt_pattern) opt = arg.read() for o in opt: print "\t", o, opt[o] print fg = FactorGraph() func = Functions1(fg) fg.load(sys.argv[1], func) print 'Factor graph loaded.' agents = {} for v in fg.vars: agents[v] = Agent(v, fg, opt, agents) print "Number of agents:", len(agents) sch = Scheduler(agents, fg, opt) sch.init()
}, '-c': { 'name': 'convergence', 'type': 'int', 'default': 5 }, '-g': { 'name': 'global_state', 'type': 'bool', 'default': False } } arg = ArgParser(sys.argv[2:], opt_pattern) opt = arg.read() fg = FactorGraph(opt) fg.load(sys.argv[1]) ms = MessageServer(opt) agents = {} for v in fg.variables: agent = Agent(v, fg, ms, opt) agents[v] = agent mentor = Mentor(agents, fg, ms, opt) mentor.initialize() mentor.run() mentor.terminate()
from factor_graph import FactorGraph from node import VarNode, FactorNode import numpy as np fg = FactorGraph() a = VarNode(name='a', graph=fg) b = VarNode(name='b', graph=fg) c = VarNode(name='c', init=[1., 1., 1.], graph=fg) f1_cpd = np.array([[2, 3], [6, 4]]) f2_cpd = np.array([[7, 2, 3], [1, 5, 2]]) f3_cpd = np.array([[7, 9, 3], [6, 4, 2]]) f1 = FactorNode(cpd=f1_cpd, graph=fg, name='f1', ordered_variables=(a, b)) f2 = FactorNode(cpd=f2_cpd, graph=fg, name='f2', ordered_variables=(a, c)) f3 = FactorNode(cpd=f3_cpd, graph=fg, name='f3', ordered_variables=(b, c)) fg.add_var_nodes([a, b, c]) fg.add_factor_nodes([f1, f2, f3]) fg.add_edge(a, f1) fg.add_edge(f1, b) fg.add_edge(b, f3) fg.add_edge(f3, c) fg.add_edge(a, f2) fg.add_edge(f2, c) pos = {a: (-3, 0), b: (0, 3), c: (3, 0), f1: (-1.5, 1.5), f2: (0, 0), f3: (1.5, 1.5)} fg.save_graph_fig(num=1, pos=pos)
import sys, json from argparser import ArgParser from factor_graph import FactorGraph from scheduler import Scheduler from variableagent import VariableAgent from functionagent import FunctionAgent from messageserver import MessageServer opt_pattern = { '-l': {'name': 'lambda', 'type': 'int', 'default': 10} } arg = ArgParser(sys.argv[2:], opt_pattern) opt = arg.read() fg = FactorGraph(opt) fg.load(sys.argv[1]) ms = MessageServer(opt) agents = {} for v in fg.variables: agent = VariableAgent(v, fg, ms, opt) agents[v] = agent for f in fg.functions: agent = FunctionAgent(f, fg, ms, opt) agents[f] = agent scheduler = Scheduler(fg, agents, ms, opt)
def test(skip_our_model=False): if TEST_TRAINED_MODEL: lbp_net.load_state_dict(torch.load(TRAINED_MODELS_DIR + MODEL_NAME)) # lbp_net.load_state_dict(torch.load(TRAINED_MODELS_DIR + "simple_4layer_firstWorking.pth")) # lbp_net.load_state_dict(torch.load(TRAINED_MODELS_DIR + "trained39non90_2layer.pth")) lbp_net.eval() sg_data, spin_glass_problems_SGMs = get_dataset(dataset_type=TEST_DATSET) data_loader = DataLoader(sg_data, batch_size=1) loss_func = torch.nn.MSELoss() exact_solution_counts = [] GNN_estimated_counts = [] LBPlibdai_estimated_counts = [] LBPmrftools_estimated_counts = [] losses = [] lbp_losses = [] mrftool_lbp_losses = [] for idx, (spin_glass_problem, exact_ln_partition_function, libdai_lbp_Z_est, mrftools_lbp_Z_estimate) in enumerate(data_loader): # spin_glass_problem.compute_bethe_free_energy() sg_problem_SGM = spin_glass_problems_SGMs[idx] if not skip_our_model: spin_glass_problem = FactorGraph.init_from_dictionary( spin_glass_problem, squeeze_tensors=True) # spin_glass_problem = spin_glass_problem.to(device) # exact_ln_partition_function = exact_ln_partition_function.to(device) estimated_ln_partition_function = lbp_net(spin_glass_problem) GNN_estimated_counts.append( estimated_ln_partition_function.item() - exact_ln_partition_function) loss = loss_func(estimated_ln_partition_function, exact_ln_partition_function.float().squeeze()) losses.append(loss.item()) libdai_lbp_Z_recompute = sg_problem_SGM.loopyBP_libdai() mrftools_lbp_Z_recompute = sg_problem_SGM.loopyBP_mrftools() LBPlibdai_estimated_counts.append(libdai_lbp_Z_recompute - exact_ln_partition_function) LBPmrftools_estimated_counts.append(mrftools_lbp_Z_recompute - exact_ln_partition_function) # LBPlibdai_estimated_counts.append(libdai_lbp_Z_est) # LBPmrftools_estimated_counts.append(mrftools_lbp_Z_estimate) exact_solution_counts.append(exact_ln_partition_function) # print("libdai_lbp_Z_recompute:", libdai_lbp_Z_recompute) # print("libdai_lbp_Z_est:", libdai_lbp_Z_est) libdai_lbp_loss = loss_func( torch.tensor(libdai_lbp_Z_recompute), exact_ln_partition_function.float().squeeze()) lbp_losses.append(libdai_lbp_loss.item()) mrftools_lbp_loss = loss_func( torch.tensor(mrftools_lbp_Z_recompute), exact_ln_partition_function.float().squeeze()) mrftool_lbp_losses.append(mrftools_lbp_loss.item()) print("libdai lbp estimated_ln_partition_function:", libdai_lbp_Z_recompute) print("mrf tools lbp estimated_ln_partition_function:", mrftools_lbp_Z_recompute) if not skip_our_model: print("GNN estimated_ln_partition_function:", estimated_ln_partition_function) print("exact_ln_partition_function:", exact_ln_partition_function) print() print("LBP libdai MSE:", np.sqrt(np.mean(lbp_losses))) print("LBP mrftools MSE:", np.sqrt(np.mean(mrftool_lbp_losses))) print("GNN MSE:", np.sqrt(np.mean(losses))) losses.sort() mrftool_lbp_losses.sort() lbp_losses.sort() if not skip_our_model: plt.plot( exact_solution_counts, GNN_estimated_counts, 'x', c='g', label= 'GNN estimate, %d iters, RMSE=%.2f, 10 lrgst removed RMSE=%.2f' % (MSG_PASSING_ITERS, np.sqrt( np.mean(losses)), np.sqrt(np.mean(losses[:-10])))) plt.plot( exact_solution_counts, LBPmrftools_estimated_counts, '+', c='r', label='LBP mrftools, %d iters, RMSE=%.2f, 10 lrgst removed RMSE=%.2f' % (parameters.MRFTOOLS_LBP_ITERS, np.sqrt(np.mean(mrftool_lbp_losses)), np.sqrt(np.mean(mrftool_lbp_losses[:-10])))) plt.plot( exact_solution_counts, LBPlibdai_estimated_counts, 'x', c='b', label='LBP libdai, %d iters, RMSE=%.2f, 10 lrgst removed RMSE=%.2f' % (parameters.LIBDAI_LBP_ITERS, np.sqrt( np.mean(lbp_losses)), np.sqrt(np.mean(lbp_losses[:-10])))) plt.plot([min(exact_solution_counts), max(exact_solution_counts)], [0, 0], '-', c='g', label='Exact') # plt.axhline(y=math.log(2)*log_2_Z[PROBLEM_NAME], color='y', label='Ground Truth ln(Set Size)') plt.xlabel('ln(Exact Model Count)', fontsize=14) plt.ylabel('ln(Estimated Model Count) - ln(Exact Model Count)', fontsize=14) plt.title('Exact Model Count vs. Estimates', fontsize=20) # plt.legend(fontsize=8, loc=2, prop={'size': 6}) plt.legend(fontsize=12, prop={'size': 8}) #make the font bigger matplotlib.rcParams.update({'font.size': 10}) plt.grid(True) # Shrink current axis's height by 10% on the bottom #box = ax.get_position() #ax.set_position([box.x0, box.y0 + box.height * 0.1, # box.width, box.height * 0.9]) #fig.savefig('/Users/jkuck/Downloads/temp.png', bbox_extra_artists=(lgd,), bbox_inches='tight') if not os.path.exists(ROOT_DIR + 'plots/'): os.makedirs(ROOT_DIR + 'plots/') # plot_name = 'trained=%s_%s_%diters_%d_%d_%.2f_%.2f.png' % (TEST_TRAINED_MODEL, TEST_DATSET, MSG_PASSING_ITERS, N_MIN, N_MAX, F_MAX, C_MAX) plot_name = 'trained=%s_dataset=%s%d_%diters_alpha%f.png' % ( TEST_TRAINED_MODEL, TEST_DATSET, len(data_loader), MSG_PASSING_ITERS, parameters.alpha) plt.savefig(ROOT_DIR + 'plots/' + plot_name)
print 'Reading options...' opt_pattern = {'-e': {'name': 'episodes', 'type': 'int', 'default': 200}, '--alpha': {'name': 'alpha', 'type': 'float', 'default': 0.9}, '--gamma': {'name': 'gamma', 'type': 'float', 'default': 0.9}, '--epsilon': {'name': 'epsilon', 'type': 'float', 'default': 0.2}, '-t': {'name': 'timeout', 'type': 'int', 'default': 200} } arg = ArgParser(sys.argv[2:], opt_pattern) opt = arg.read() for o in opt: print "\t",o, opt[o] print fg = FactorGraph() func = Functions1(fg) fg.load(sys.argv[1], func) print 'Factor graph loaded.' agents = {} for v in fg.vars: agents[v] = Agent(v, fg, opt, agents) print "Number of agents:", len(agents) sch = Scheduler(agents, fg, opt) sch.init()
import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from math import exp, sqrt import sys, json from functions import Functions from pareto import get_pareto_frontier_by_point from factor_graph import FactorGraph log = json.loads(open(sys.argv[1], 'r').read()) fg = FactorGraph(log['opt']) fg.load('fg.json') func = Functions(fg) xp = [] yp = [] zp = [] for p in log['path_log']: xp.append(p['functions']['f1']) yp.append(p['functions']['f2']) zp.append(p['functions']['f3']) fig = plt.figure() ax = Axes3D(fig) ax.scatter(xp, yp, zp, c="r", s=30) for i in range(len(yp)): ax.text(xp[i], yp[i], zp[i], '%s' % (str(i)))
elif dimension == 3: if recipient_id > from_node_id: outgoing_message = np.dot(np.array(state[0]), np.array(messages[from_node_id])) else: outgoing_message = np.dot(np.array(state[1]), np.array(messages[from_node_id])) return outgoing_message function_list = [ sum_product_update_fac, sum_product_update_var, normalize_message ] config = { "algorithm": "sum_product", "pubsub_choice": "redis", "synchronous": "asynchronous", "number_of_iter": 20, "time_till_stop": 20, "verbose": True } path_to_input_file = "examples/hmm_simple_factor_graph_ver_7_new_ui.txt" fg = FactorGraph(path_to_input_file, config, function_list) fg.run() fg.print_solution()
from factor_graph import FactorGraph from node import VarNode, FactorNode import numpy as np fg = FactorGraph() params = {'graph': fg} x1 = VarNode(name='x1', **params) x2 = VarNode(name='x2', **params) x3 = VarNode(name='x3', **params) x4 = VarNode(name='x4', **params) cpd_a = np.array([[3, 4], [3, 9]]) cpd_b = np.array([[3, 4], [5, 1]]) cpd_c = np.array([[7, 8], [3, 9]]) fa = FactorNode(cpd_a, (x1, x2), name='fa', **params) fb = FactorNode(cpd_b, (x2, x3), name='fb', **params) fc = FactorNode(cpd_c, (x2, x4), name='fc', **params) fg.add_var_nodes([x1, x2, x3, x4]) fg.add_factor_nodes([fa, fb, fc]) fg.add_edge(x1, fa) fg.add_edge(fa, x2) fg.add_edge(x2, fb) fg.add_edge(fb, x3) fg.add_edge(x2, fc) fg.add_edge(fc, x4) print(fg.nodes) print(fg.neighbors(x4))
from factor_graph import FactorGraph from node import VarNode, FactorNode import numpy as np fg = FactorGraph() a = VarNode(name='a', graph=fg) b = VarNode(name='b', graph=fg) c = VarNode(name='c', init=[1., 1., 1.], graph=fg) d = VarNode(name='d', graph=fg) f1_cpd = np.array([[2, 3], [6, 4]]) f2_cpd = np.array([[[7, 2, 3], [1, 5, 2]], [[8, 3, 9], [6, 4, 2]]]) f3_cpd = np.array([5, 1, 9]) f1 = FactorNode(cpd=f1_cpd, graph=fg, name='f1', ordered_variables=(a, b)) f2 = FactorNode(cpd=f2_cpd, graph=fg, name='f2', ordered_variables=(b, d, c)) f3 = FactorNode(cpd=f3_cpd, graph=fg, name='f3', ordered_variables=(c, )) fg.add_var_nodes([a, b, c, d]) fg.add_factor_nodes([f1, f2, f3]) fg.add_edge(a, f1) fg.add_edge(f1, b) fg.add_edge(b, f2) fg.add_edge(f2, c) fg.add_edge(f2, d) fg.add_edge(c, f3) fg.sum_product(node=b)
from functionssg import Functions from factor_graph import FactorGraph import sys, json fg = FactorGraph({'global_state': False}) fg.load('fgsg.json') func = Functions(fg) log = json.loads(open(sys.argv[1], 'r').read()) for p in log['path_log'][-1]['pareto']: values = {} i = 0 for v in fg.variables: values[v] = p[0][i] i += 1 zero = 0 fv = [] for i in range(20): v = func.calculate('v%d' % i, values) if v == 0: zero += 1 fv.append(str(v)) print 'zeros: %d' % zero print ', '.join(fv) print