T = 16 lr = 0.5 its = 1500 fix_mix_its = int(its * 0.5) logging_itv = 50 utils.set_log_potential_funs( g.factors_list, skip_existing=True) # g factors' lpot_fun should still be None # above will also set the lpot_fun in all the (completely unobserved) factors in cond_g if algo_name in ('OSI', 'NPVI'): if cond: # TODO: ugly; fix _g = cond_g else: _g = g if algo_name == 'OSI': vi = OneShot(g=_g, K=K, T=T, seed=seed) else: vi = NPVI(g=_g, K=K, T=T, isotropic_cov=False, seed=seed) else: if cond: cg = CompressedGraphSorted(cond_g) else: # technically incorrect; currently we should run LOSI on the conditional MRF cg = CompressedGraphSorted(g) cg.run() print('number of rvs in cg', len(cg.rvs)) print('number of factors in cg', len(cg.factors)) if algo_name == 'LOSI': vi = LiftedOneShot(g=cg, K=K, T=T, seed=seed) else: vi = LiftedNPVI(g=cg, K=K, T=T, seed=seed)
def __init__(self, fileName, maxDuration, queue, prereqs = None): OneShot.__init__(self, fileName, maxDuration, prereqs) self.queue = queue
def stop(self, source, target, callbackFn): OneShot.stop(self, source, target, callbackFn) self.queue.put(self)
T = 10 # lr = 1e-1 lr = 5e-1 # its = 1000 its = 500 # fix_mix_its = int(its * 0.1) fix_mix_its = int(its * 1.0) # fix_mix_its = 500 logging_itv = 100 # cond = True cond = True if cond: cond_g.init_nb( ) # this will make cond_g rvs' .nb attributes consistent (baseline didn't care so it was OK) if cond: osi = OneShot(g=cond_g, K=K, T=T, seed=seed) else: osi = OneShot(g=g, K=K, T=T, seed=seed) if cond: # clearn up just in case someone need to uses rvs.nb in g later for i, rv in enumerate(g.rvs_list): rv.nb = g_rv_nbs[ i] # restore; undo possible mutation from cond_g.init_nb() start_time = time.process_time() osi.run(lr=lr, its=its, fix_mix_its=fix_mix_its, logging_itv=logging_itv) # print('Mu =\n', osi.params['Mu'], '\nVar =\n', osi.params['Var']) time_cost[name] = (time.process_time() - start_time) / num_test + time_cost.get(name, 0) print(name, f'time {time.process_time() - start_time}') err = [] pred = {} for key in key_list:
# time_cost[name] = (time.process_time() - start_time) / num_runs / num_tests + time_cost.get(name, 0) # print(name, f'time {time.process_time() - start_time}') for i, rv in enumerate(rvs): mmap_res[algo, i] = bp.map(rv) algo += 1 name = names[algo] utils.set_log_potential_funs( g.factors_list) # OSI assumes factors have callable .log_potential_fun K = 2 T = 10 lr = 0.2 its = 10 fix_mix_its = int(its * 0.8) osi = OneShot( g, K, T) # can be moved outside of all loops if the ground MRF doesn't change osi.run(its, lr=lr) for i, rv in enumerate(rvs): mmap_res[algo, i] = osi.map(rv) # ground truth # first, need to convert factor.log_potential_funs to one of (LogTable, LogQuadratic, LogHybridQuadratic); # this can be done by first converting the corresponding potentials to one of (TablePotential, QuadraticPotential, # HybridQuadraticPotential), then calling the .to_log_potential method on the potential objects # manual conversion here: # factors[0].potential = HybridQuadraticPotential( # A=-factors[0].potential.w * np.array([np.array([[1., 0], [0, 0]]), np.array([[1., 0.], [0., 0.]])]), # b=-factors[0].potential.w * np.array([[-16., 0], [14., 0.]]), # c=-factors[0].potential.w * np.array([64., 49.]) # )
rel_g = RelationalGraph() rel_g.atoms = (atom_friend, atom_smoke_x, atom_cancer) rel_g.param_factors = (f1, f2) rel_g.init_nb() rel_g.data = data g, rvs_table = rel_g.grounded_graph() print(rvs_table) from OneShot import OneShot utils.set_log_potential_funs(g.factors_list) K = 4 T = 8 osi = OneShot(g=g, K=K, T=T, seed=seed) res = osi.run(lr=1e-1, its=200) for key, rv in sorted(rvs_table.items()): if rv.value is None: # only test non-evidence nodes print(key, osi.map(obs_rvs=[], query_rv=rv)) import matplotlib.pyplot as plt record = res['record'] for key in record: plt.plot(record[key], label=key) plt.legend(loc='best') save_name = __file__.split('.py')[0] plt.savefig('%s.png' % save_name) # EPBP inference
for i, key in enumerate(key_list): res[i, :] -= ans[key] avg_diff[name] = np.average(np.average( abs(res), axis=1)) / num_test + avg_diff.get(name, 0) err_var[name] = np.average(np.average( res**2, axis=1)) / num_test + err_var.get(name, 0) print(name, 'diff', np.average(np.average(abs(res), axis=1))) print( name, 'var', np.average(np.average(res**2, axis=1)) - np.average(np.average(abs(res), axis=1))**2) name = 'OSI' res = np.zeros((len(key_list), 5)) osi = OneShot( g, num_mixtures=1, num_quadrature_points=2 ) # can be moved outside of all loops, as the ground MRF doesn't change for j in range(5): start_time = time.process_time() osi.run(200, lr=2) time_cost[name] = (time.process_time() - start_time) / 5 / num_test + time_cost.get(name, 0) print(name, f'time {time.process_time() - start_time}') for i, key in enumerate(key_list): res[i, j] = osi.map(rvs_table[key]) for i, key in enumerate(key_list): res[i, :] -= ans[key] avg_diff[name] = np.average(np.average( abs(res), axis=1)) / num_test + avg_diff.get(name, 0) err_var[name] = np.average(np.average( res**2, axis=1)) / num_test + err_var.get(name, 0)
for rv in g.rvs: if rv.value is not None: num_evidence += 1 print('number of evidence', num_evidence) start_time = time.process_time() bp.run(20, log_enable=False) time_cost.append(time.process_time() - start_time) print('time lapse', time.process_time() - start_time) for idx, rv in enumerate(rvs_table[t - 1]): result[idx, i] = bp.map(rv) elif algo == 'OSI': if cond: osi = OneShot(g=cond_g, K=K, T=T, seed=seed) else: osi = OneShot(g=g, K=K, T=T, seed=seed) start_time = time.process_time() osi.run(lr=lr, its=its, fix_mix_its=fix_mix_its, logging_itv=logging_itv) time_cost.append(time.process_time() - start_time) # print('Mu =\n', osi.params['Mu'], '\nVar =\n', osi.params['Var']) print(algo, f'time {time_cost[-1]}') for idx, rv in enumerate(rvs_table[t - 1]): if cond: result[idx, i] = osi.map(obs_rvs=[], query_rv=rv) else:
T = 16 lr = 0.5 its = 1500 fix_mix_its = int(its * 0.2) logging_itv = 500 utils.set_log_potential_funs( g.factors_list, skip_existing=True) # g factors' lpot_fun should still be None # above will also set the lpot_fun in all the (completely unobserved) factors in cond_g if algo_name in ('OSI', 'NPVI'): if cond: # TODO: ugly; fix _g = cond_g else: _g = g if algo_name == 'OSI': vi = OneShot(g=_g, K=K, T=T, seed=test_seed) else: vi = NPVI(g=_g, K=K, T=T, isotropic_cov=False, seed=test_seed) else: if cond: cg = CompressedGraphSorted(cond_g) else: # technically incorrect; currently we should run LOSI on the conditional MRF cg = CompressedGraphSorted(g) cg.run() print('number of rvs in cg', len(cg.rvs)) print('number of factors in cg', len(cg.factors))
print(params) if 'pi' in params: print(w @ params['pi']) if 'mu' in params: print(w @ params['mu']) import matplotlib.pyplot as plt record = res['record'] # for key in record: # plt.plot(record[key], label=key) plt.plot(record['bfe'], label='bfe (with lifting)') print('no lifting') osi = OneShot(g=g, K=K, T=T, seed=seed) res = osi.run(lr=lr, its=its) w = res['w'] for rv in sorted(g.rvs): print(rv) params = rv.belief_params print(params) if 'pi' in params: print(w @ params['pi']) if 'mu' in params: print(w @ params['mu']) import matplotlib.pyplot as plt record = res['record'] # for key in record:
"""Unplugs any children that have terminated, and returns true if there are no running child components left (ie. their microproceses have finished) """ for child in self.childComponents(): if child._isStopped(): self.removeChild(child) # deregisters linkages for us return 0==len(self.childComponents()) __kamaelia_components__ = ( Seq, ) if __name__=="__main__": from Kamaelia.Chassis.Pipeline import Pipeline from OneShot import OneShot from Kamaelia.Util.Console import ConsoleEchoer Pipeline( Seq( "BEGIN SEQUENCE", OneShot("Hello\n"), OneShot("Doctor\n"), OneShot("Name\n"), OneShot("Continue\n"), OneShot("Yesterday\n"), OneShot("Tomorrow\n"), "END SEQUENCE", ), ConsoleEchoer(), ).run()
res**2, axis=1)) / num_tests + err_var.get(name, 0) print(name, 'diff', np.average(np.average(abs(res), axis=1))) print( name, 'var', np.average(np.average(res**2, axis=1)) - np.average(np.average(abs(res), axis=1))**2) name = 'OSI' K = 5 T = 20 lr = 0.1 its = 200 fix_mix_its = int(its * 0.) res = np.zeros((len(key_list), num_runs)) osi = OneShot( g=g, K=K, T=T, seed=seed ) # can be moved outside of all loops if the ground MRF doesn't change for j in range(num_runs): start_time = time.process_time() osi.run(lr=lr, its=its, fix_mix_its=fix_mix_its) time_cost[name] = (time.process_time() - start_time) / num_runs / num_tests + time_cost.get( name, 0) print(name, f'time {time.process_time() - start_time}') for i, key in enumerate(key_list): res[i, j] = osi.map(rvs_table[key]) print(res[:, j]) # print(osi.params) print('Mu =\n', osi.params['Mu'], '\nVar =\n', osi.params['Var']) for i, key in enumerate(key_list): res[i, :] -= ans[key]
# time_cost[name] = (time.process_time() - start_time) / num_runs / num_tests + time_cost.get(name, 0) # print(name, f'time {time.process_time() - start_time}') for i, rv in enumerate(rvs): mmap_res[algo, i] = bp.map(rv) algo += 1 name = names[algo] utils.set_log_potential_funs( g.factors_list) # OSI assumes factors have callable .log_potential_fun K = 2 T = 12 lr = 0.5 its = 200 fix_mix_its = int(its * 0.5) osi = OneShot( g=g, K=K, T=T, seed=seed ) # can be moved outside of all loops if the ground MRF doesn't change osi.run(lr=lr, its=its, fix_mix_its=fix_mix_its) # print('osi params', osi.params) for i, rv in enumerate(rvs): mmap_res[algo, i] = osi.map(obs_rvs=[], query_rv=rv) # ground truth # first, need to convert factor.log_potential_funs to one of (LogTable, LogQuadratic, LogHybridQuadratic); # this can be done by first converting the corresponding potentials to one of (TablePotential, QuadraticPotential, # HybridQuadraticPotential), then calling the .to_log_potential method on the potential objects # manual conversion here: factors[0].potential = HybridQuadraticPotential( A=-factors[0].potential.w * np.array([np.array([[1., 0], [0, 0]]), np.array([[0., 0.], [0., 1.]])]),
rel_g.init_nb() rel_g.data = data g, rvs_table = rel_g.grounded_graph() from OneShot import OneShot utils.set_log_potential_funs( g.factors_list) # OSI assumes factors have callable .log_potential_fun grad_check = False if not grad_check: K = 3 T = 20 lr = 1e-2 its = 500 osi = OneShot(g=g, K=K, T=T, seed=seed) res = osi.run(lr=lr, its=its) record = res['record'] del res['record'] print(res) for key, rv in rvs_table.items(): if rv.value is None: # only test non-evidence nodes print(rv, key, osi.map(obs_rvs=[], query_rv=rv)) import matplotlib.pyplot as plt for key in record: plt.plot(record[key], label=key) plt.legend(loc='best') save_name = __file__.split('.py')[0] plt.savefig('%s.png' % save_name)
def __init__(self, fileName, maxDuration, queue, prereqs=None): OneShot.__init__(self, fileName, maxDuration, prereqs) self.queue = queue
# marg_kls = np.zeros(len(query_rvs)) - 123 map_energy = -123 obj = -1 cpu_time = wall_time = -1 # don't care if algo_name in ('OSI', 'LOSI', 'NPVI', 'LNPVI'): # K = 1 T = 3 lr = 0.5 its = 1000 fix_mix_its = int(its * 0.5) logging_itv = 100 if algo_name in ('OSI', 'NPVI'): if algo_name == 'OSI': vi = OneShot(g=cond_g, K=K, T=T, seed=test_seed) else: vi = NPVI(g=cond_g, K=K, T=T, isotropic_cov=False, seed=test_seed) start_time = time.process_time() start_wall_time = time.time() res = vi.run(lr=lr, its=its, fix_mix_its=fix_mix_its, logging_itv=logging_itv) cpu_time = time.process_time() - start_time wall_time = time.time() - start_wall_time
print('true -log Z =', -np.log(Z)) g = Graph() g.rvs = rvs g.factors = factors g.init_nb() g.init_rv_indices() from OneShot import OneShot grad_check = False if not grad_check: K = 4 T = 30 lr = 5e-1 osi = OneShot(g=g, K=K, T=T, seed=seed) optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr) res = osi.run(lr=lr, optimizer=optimizer, its=1000) w = res['w'] w_row = w[None, :] for rv in sorted(g.rvs): params = rv.belief_params print(params) if 'pi' in params: print(w @ params['pi']) if 'mu' in params: print(w @ params['mu']) import matplotlib.pyplot as plt record = res['record']