コード例 #1
0
def cross_entropy(bn1: BayesNet, bn2: BayesNet, nsamples: int = None) -> float:
	cross_ent = .0
	if nsamples is None:
		bn1_vars = bn1.nodes.keys()
		for sample in all_dicts(bn1_vars):
		    cross_ent -= np.exp(bn1.sample_log_prob(sample)) * bn2.sample_log_prob(sample)
	else:
		for _ in range(nsamples):
			cross_ent -= bn2.sample_log_prob(bn1.sample())
		cross_ent /= nsamples
	return cross_ent
コード例 #2
0
def main():
	args = get_args()
	table_bn = BayesNet(bn_file=args.file_name)
	mle_bn = MLEBayesNet(bn_file=args.file_name)
	parametric_bn = ParametricBayesNet(bn_file=args.file_name)

	print("Initial params MLE bn:")
	print(mle_bn.pretty_print())

	print("Initial params parametric bn:")
	print(parametric_bn.pretty_print())

	print("========== Frequentist MLE ==========")
	samples = read_samples(args.samples_file_name)
	mle_bn.learn_cpds(samples)

	print("Reference BN")
	print(table_bn.pretty_print())

	print("MLE BayesNet after learning CPDs")
	print(mle_bn.pretty_print())

	print("========== Parametric MLE ==========")

	# ref_cent = cross_entropy(table_bn, table_bn)
	# cent = cross_entropy(table_bn, parametric_bn, nsamples=100)
	# print("Step %6d | CE: %6.3f / %6.3f" % (0, cent, ref_cent))

	for step in range(1, 1000):
		sample = table_bn.sample()
		parametric_bn.learn(sample, learning_rate=args.lr)

		if step % 500 == 0:
			print("step: ", step)
			# cent = cross_entropy(table_bn, parametric_bn, nsamples=200)
			# print("Step %6d | CE: %6.3f / %6.3f" % (step, cent, ref_cent))
			# print(f"Step {step:6d} | CE: {cent:6.3f} / {ref_cent:6.3f}")

	print("Reference BN")
	print(table_bn.pretty_print())

	print("Parametric BayesNet after learning CPDs")
	print(parametric_bn.pretty_print())