def test_empirical_distrib(self): st = CategoricalTableBuilder("var1") st.add_row("val1", 0.6) st.add_row("val2", 0.4) builder = ConditionalTableBuilder("var2") builder.add_row(Assignment("var1", "val1"), "val1", 0.9) builder.add_row(Assignment("var1", "val1"), "val2", 0.1) builder.add_row(Assignment("var1", "val2"), "val1", 0.2) builder.add_row(Assignment("var1", "val2"), "val2", 0.8) bn = BNetwork() var1 = ChanceNode("var1", st.build()) bn.add_node(var1) var2 = ChanceNode("var2", builder.build()) var2.add_input_node(var1) bn.add_node(var2) sampling = SamplingAlgorithm(2000, 500) distrib = sampling.query_prob(bn, "var2", Assignment("var1", "val1")) assert distrib.get_prob("val1") == pytest.approx(0.9, abs=0.05) assert distrib.get_prob("val2") == pytest.approx(0.1, abs=0.05) distrib2 = sampling.query_prob(bn, "var2") assert distrib2.get_prob("val1") == pytest.approx(0.62, abs=0.05) assert distrib2.get_prob("val2") == pytest.approx(0.38, abs=0.05)
def test_dep_empirical_distrib_continuous(self): bn = BNetwork() builder = CategoricalTableBuilder("var1") builder.add_row(ValueFactory.create("one"), 0.7) builder.add_row(ValueFactory.create("two"), 0.3) var1 = ChanceNode("var1", builder.build()) bn.add_node(var1) continuous = ContinuousDistribution("var2", UniformDensityFunction(-1.0, 3.0)) continuous2 = ContinuousDistribution( "var2", GaussianDensityFunction(3.0, 10.0)) table = ConditionalTable("var2") table.add_distrib(Assignment("var1", "one"), continuous) table.add_distrib(Assignment("var1", "two"), continuous2) var2 = ChanceNode("var2", table) var2.add_input_node(var1) bn.add_node(var2) inference = InferenceChecks() inference.check_cdf(bn, "var2", -1.5, 0.021) inference.check_cdf(bn, "var2", 0., 0.22) inference.check_cdf(bn, "var2", 2., 0.632) inference.check_cdf(bn, "var2", 8., 0.98)
def add_to_state(self, distrib): """ Adds a new node to the dialogue state with the distribution provided as argument. :param distrib: the distribution to include """ variable = distrib.get_variable() + "'" self.set_as_committed(variable) distrib.modify_variable_id(distrib.get_variable(), variable) chance_node = ChanceNode(variable, distrib) if self.has_node(variable): to_remove = self.get_node(variable) self.remove_nodes(to_remove.get_descendant_ids()) self.remove_node(to_remove.get_id()) for input_variable in distrib.get_input_variables(): if self.has_chance_node(input_variable): chance_node.add_input_node( self.get_chance_node(input_variable)) self.add_node(chance_node) self._connect_to_predictions(chance_node) if variable in self._incremental_vars: self._incremental_vars.remove(variable)
def create_chance_node(node): """ Creates a new chance node corresponding to the XML specification :param node: the XML node :return: the resulting chance node encoded """ if len(node.attrib) == 0: raise ValueError() try: label = node.attrib['id'].strip() except: raise ValueError() if len(label) == 0: raise ValueError() builder = CategoricalTableBuilder(label) distrib = None for child_node in node: if child_node.tag == 'value': # first case: the chance node is described as a categorical table. # extracting the value prob = XMLStateReader._get_probability(child_node) value = ValueFactory.create(child_node.text.strip()) builder.add_row(value, prob) elif child_node.tag == 'distrib': # second case: the chance node is described by a parametric continuous distribution. try: distrib_type = child_node.attrib['type'].lower() if distrib_type == 'gaussian': distrib = ContinuousDistribution( label, XMLStateReader._get_gaussian(child_node)) elif distrib_type == 'uniform': distrib = ContinuousDistribution( label, XMLStateReader._get_uniform(child_node)) elif distrib_type == 'dirichlet': distrib = ContinuousDistribution( label, XMLStateReader._get_dirichlet(child_node)) except: raise ValueError() if distrib is not None: return ChanceNode(label, distrib) total_prob = builder.get_total_prob() # TODO: check eps eps = 1e-8 if total_prob > 1.0 + eps: raise ValueError() return ChanceNode(label, builder.build())
def construct_basic_network4(): network = NetworkExamples.construct_basic_network() node = ChanceNode( "gaussian", ContinuousDistribution("gaussian", UniformDensityFunction(-2, 3))) network.add_node(node) return network
def test_table_expansion(self): bn = NetworkExamples.construct_basic_network() builder = CategoricalTableBuilder("HouseSize") builder.add_row(ValueFactory.create("Small"), 0.7) builder.add_row(ValueFactory.create("Big"), 0.2) builder.add_row(ValueFactory.create("None"), 0.1) node = ChanceNode("HouseSize", builder.build()) bn.add_node(node) bn.get_node("Burglary").add_input_node(node) assert bn.get_chance_node("Burglary").get_prob( Assignment(["HouseSize", "Small"]), ValueFactory.create(True)) == pytest.approx(0.001, abs=0.0001) assert bn.get_chance_node("Burglary").get_prob( Assignment(["HouseSize", "Big"]), ValueFactory.create(True)) == pytest.approx(0.001, abs=0.0001) bn.get_node("Alarm").add_input_node(node) assert bn.get_chance_node("Alarm").get_prob( Assignment(["Burglary", "Earthquake"]), ValueFactory.create(True)) == pytest.approx(0.95, abs=0.0001) assert bn.get_chance_node("Alarm").get_prob( Assignment(Assignment(["Burglary", "Earthquake"]), "HouseSize", ValueFactory.create("None")), ValueFactory.create(True)) == pytest.approx(0.95, abs=0.0001)
def test_empirical_distrib_continuous(self): continuous = ContinuousDistribution("var1", UniformDensityFunction(-1.0, 3.0)) bn = BNetwork() var1 = ChanceNode("var1", continuous) bn.add_node(var1) sampling = SamplingAlgorithm(2000, 200) distrib2 = sampling.query_prob(bn, "var1") assert len(distrib2.get_posterior( Assignment()).get_values()) == pytest.approx( Settings.discretization_buckets, abs=2) assert distrib2.to_continuous().get_cumulative_prob( -1.1) == pytest.approx(0, abs=0.001) assert distrib2.to_continuous().get_cumulative_prob( 1.0) == pytest.approx(0.5, abs=0.06) assert distrib2.to_continuous().get_cumulative_prob( 3.1) == pytest.approx(1.0, abs=0.00) assert continuous.get_prob_density(-2.0) == pytest.approx( distrib2.to_continuous().get_prob_density(-2.0), abs=0.1) assert continuous.get_prob_density(-0.5) == pytest.approx( distrib2.to_continuous().get_prob_density(-0.5), abs=0.1) assert continuous.get_prob_density(1.8) == pytest.approx( distrib2.to_continuous().get_prob_density(1.8), abs=0.1) assert continuous.get_prob_density(3.2) == pytest.approx( distrib2.to_continuous().get_prob_density(3.2), abs=0.1)
def reduce(self, query): """ Reduces the Bayesian network to a subset of its variables. This reduction operates here by generating the possible conditional assignments for every retained variables, and calculating the distribution for each assignment. :param query: the reduction query :return: the reduced network """ network = query.get_network() query_vars = set(query.get_query_vars()) evidence = query.get_evidence() original_sorted_node_ids = network.get_sorted_node_ids() sorted_node_ids = list() for node_id in original_sorted_node_ids: if node_id in query_vars: sorted_node_ids.append(node_id) sorted_node_ids = list(reversed(sorted_node_ids)) reduced_network = BNetwork() for variable_id in sorted_node_ids: direct_ancestors = network.get_node(variable_id).get_ancestor_ids( query_vars) input_values = dict() for direct_ancestor in direct_ancestors: input_values[direct_ancestor] = network.get_node( variable_id).get_values() assignments = InferenceUtils.get_all_combinations(input_values) builder = ConditionalTableBuilder(variable_id) for assignment in assignments: new_evidence = Assignment([evidence, assignment]) result = self.query_prob(network, variable_id, new_evidence) builder.add_rows(assignment, result.get_table()) chance_node = ChanceNode(variable_id, builder.build()) for ancestor in direct_ancestors: chance_node.add_input_node(reduced_network.get_node(ancestor)) reduced_network.add_node(chance_node) return reduced_network
def reduce(state, nodes_to_keep): """ Reduces a Bayesian network to a subset of variables. The method is divided in three steps: - The method first checks whether inference is necessary at all or whether the current network can be returned as it is. - If inference is necessary, the algorithm divides the network into cliques and performs inference on each clique separately. - Finally, if only one clique is present, the reduction selects the best algorithm and return the result of the reduction process. :param state: the dialogue state to reduce :param nodes_to_keep: the nodes to preserve in the reduction :return: the reduced dialogue state """ evidence = state.get_evidence() if evidence.contains_vars(nodes_to_keep): # if all nodes to keep are included in the evidence, no inference is needed new_state = DialogueState() for node_to_keep in nodes_to_keep: new_node = ChanceNode(node_to_keep, evidence.get_value(node_to_keep)) new_state.add_node(new_node) return new_state elif (state.get_node_ids()).issubset(nodes_to_keep): # if the current network can be returned as such, do it return state elif state.is_clique(nodes_to_keep) and not evidence.contains_one_var( nodes_to_keep): # if all nodes belong to a single clique and the evidence does not # pertain to them, return the subset of nodes return DialogueState(state.get_nodes(nodes_to_keep), evidence) elif state.contains_distrib(nodes_to_keep, AnchoredRule): # if some rule nodes are included return StatePruner.reduce_light(state, nodes_to_keep) # if the network can be divided into cliques, extract the cliques # and do a separate reduction for each cliques = state.get_cliques(nodes_to_keep) if len(cliques) > 1: full_state = DialogueState() for clique in cliques: clique.intersection_update(nodes_to_keep) clique_state = StatePruner.reduce(state, clique) full_state.add_network(clique_state) full_state.add_evidence(clique_state.get_evidence()) return full_state result = SwitchingAlgorithm().reduce(state, nodes_to_keep, evidence) return DialogueState(result)
def reduce(self, query): """ Reduces the Bayesian network to a subset of its variables and returns the result. NB: the equivalent "reduce" method includes additional speed-up methods to simplify the reduction process. :param query: the reduction query :return: the reduced Bayesian network """ network = query.get_network() query_vars = query.get_query_vars() is_query = LikelihoodWeighting(query, self._nr_samples, self._max_sampling_time) samples = is_query.get_samples() full_distrib = EmpiricalDistribution(samples) reduced_network = BNetwork() for variable in query.get_sorted_query_vars(): input_node_ids = network.get_node(variable).get_ancestor_ids( query_vars) for input_node_id in list(input_node_ids): input_node = reduced_network.get_chance_node(input_node_id) if isinstance(input_node.get_distrib(), ContinuousDistribution): input_node_ids.remove(input_node_id) distrib = full_distrib.get_marginal(variable, input_node_ids) node = ChanceNode(variable, distrib) for input_node_id in input_node_ids: node.add_input_node(reduced_network.get_node(input_node_id)) reduced_network.add_node(node) return reduced_network
def reduce(self, query): """ Reduces the Bayesian network by retaining only a subset of variables and marginalising out the rest. :param query: the query containing the network to reduce, the variables to retain, and possible evidence. :return: the probability distributions for the retained variables reduction operation failed """ network = query.get_network() query_vars = query.get_query_vars() query_factor = self._create_query_factor(query) reduced_network = BNetwork() original_sorted_node_ids = network.get_sorted_node_ids() sorted_node_ids = list() for node_id in original_sorted_node_ids: if node_id in query_vars: sorted_node_ids.append(node_id) sorted_node_ids = list(reversed(sorted_node_ids)) for variable in sorted_node_ids: direct_ancestors = network.get_node(variable).get_ancestor_ids( query_vars) factor = self._get_relevant_factor(query_factor, variable, direct_ancestors) distrib = self._create_prob_distribution(variable, factor) chance_node = ChanceNode(variable, distrib) for ancestor in direct_ancestors: chance_node.add_input_node(reduced_network.get_node(ancestor)) reduced_network.add_node(chance_node) return reduced_network
def _connect_to_predictions(self, output_node): """ Connects the chance node to its prior predictions (if any) :param output_node: the output node to connect """ output_var = output_node.get_id() base_var = output_var[:-1] predict_equiv = base_var + "^p" if self.has_chance_node(predict_equiv) and "^p" not in output_var: equality_node = ChanceNode("=_" + base_var, EquivalenceDistribution(base_var)) equality_node.add_input_node(output_node) equality_node.add_input_node(self.get_node(predict_equiv)) self.add_evidence(Assignment(equality_node.get_id(), True)) self.add_node(equality_node)
def _add_probability_rule(self, rule): """ Adds the probability rule to the dialogue state :param rule: the anchored rule (must be of type PROB) nodes fails """ rule_id = rule.get_variable() if self.has_chance_node(rule_id): self.remove_node(rule_id) rule_node = ChanceNode(rule_id, rule) rule_node.get_values() for var in rule.get_input_variables().union(rule.get_parameters()): if self.has_chance_node(var): rule_node.add_input_node(self.get_chance_node(var)) else: raise ValueError('undefined node type of %s' % var) self.add_node(rule_node) for updated_var in rule.get_outputs(): if not self.has_node(updated_var): output_distrib = OutputDistribution(updated_var) output_node = ChanceNode(updated_var, output_distrib) self.add_node(output_node) self._connect_to_predictions(output_node) else: output_node = self.get_chance_node(updated_var) output_distrib = output_node.get_distrib() output_node.add_input_node(rule_node) output_distrib.add_anchored_rule(rule)
def test_switching(self): old_factor = SwitchingAlgorithm.max_branching_factor SwitchingAlgorithm.max_branching_factor = 4 network = NetworkExamples.construct_basic_network2() distrib = SwitchingAlgorithm().query_prob( network, ["Burglary"], Assignment(["JohnCalls", "MaryCalls"])) assert isinstance(distrib, MultivariateTable) builder = CategoricalTableBuilder("n1") builder.add_row(ValueFactory.create("aha"), 1.0) n1 = ChanceNode("n1", builder.build()) network.add_node(n1) builder = CategoricalTableBuilder("n2") builder.add_row(ValueFactory.create("oho"), 0.7) n2 = ChanceNode("n2", builder.build()) network.add_node(n2) builder = CategoricalTableBuilder("n3") builder.add_row(ValueFactory.create("ihi"), 0.7) n3 = ChanceNode("n3", builder.build()) network.add_node(n3) network.get_node("Alarm").add_input_node(n1) network.get_node("Alarm").add_input_node(n2) network.get_node("Alarm").add_input_node(n3) distrib = SwitchingAlgorithm().query_prob( network, ["Burglary"], Assignment(["JohnCalls", "MaryCalls"])) assert distrib.__class__ == EmpiricalDistribution network.remove_node(n1.get_id()) network.remove_node(n2.get_id()) distrib = SwitchingAlgorithm().query_prob( network, ["Burglary"], Assignment(["JohnCalls", "MaryCalls"])) assert isinstance(distrib, MultivariateTable) n1 = ChanceNode( "n1", ContinuousDistribution("n1", UniformDensityFunction(-2.0, 2.0))) n2 = ChanceNode( "n2", ContinuousDistribution("n2", GaussianDensityFunction(-1.0, 3.0))) network.add_node(n1) network.add_node(n2) network.get_node("Earthquake").add_input_node(n1) network.get_node("Earthquake").add_input_node(n2) distrib = SwitchingAlgorithm().query_prob( network, ["Burglary"], Assignment(["JohnCalls", "MaryCalls"])) assert isinstance(distrib, EmpiricalDistribution) SwitchingAlgorithm.max_branching_factor = old_factor
def construct_basic_network(): network = BNetwork() builder = CategoricalTableBuilder('Burglary') builder.add_row(ValueFactory.create(True), 0.001) builder.add_row(ValueFactory.create(False), 0.999) b = ChanceNode("Burglary", builder.build()) network.add_node(b) builder = CategoricalTableBuilder('Earthquake') builder.add_row(ValueFactory.create(True), 0.002) builder.add_row(ValueFactory.create(False), 0.998) e = ChanceNode("Earthquake", builder.build()) network.add_node(e) builder = ConditionalTableBuilder('Alarm') builder.add_row(Assignment(["Burglary", "Earthquake"]), ValueFactory.create(True), 0.95) builder.add_row(Assignment(["Burglary", "Earthquake"]), ValueFactory.create(False), 0.05) builder.add_row(Assignment(["Burglary", "!Earthquake"]), ValueFactory.create(True), 0.95) builder.add_row(Assignment(["Burglary", "!Earthquake"]), ValueFactory.create(False), 0.05) builder.add_row(Assignment(["!Burglary", "Earthquake"]), ValueFactory.create(True), 0.29) builder.add_row(Assignment(["!Burglary", "Earthquake"]), ValueFactory.create(False), 0.71) builder.add_row(Assignment(["!Burglary", "!Earthquake"]), ValueFactory.create(True), 0.001) builder.add_row(Assignment(["!Burglary", "!Earthquake"]), ValueFactory.create(False), 0.999) a = ChanceNode("Alarm", builder.build()) a.add_input_node(b) a.add_input_node(e) network.add_node(a) builder = ConditionalTableBuilder("MaryCalls") builder.add_row(Assignment("Alarm"), ValueFactory.create(True), 0.7) builder.add_row(Assignment("Alarm"), ValueFactory.create(False), 0.3) builder.add_row(Assignment("!Alarm"), ValueFactory.create(True), 0.01) builder.add_row(Assignment("!Alarm"), ValueFactory.create(False), 0.99) mc = ChanceNode("MaryCalls", builder.build()) mc.add_input_node(a) network.add_node(mc) builder = ConditionalTableBuilder("JohnCalls") builder.add_row(Assignment(["Alarm"]), ValueFactory.create(True), 0.9) builder.add_row(Assignment(["Alarm"]), ValueFactory.create(False), 0.1) builder.add_row(Assignment(["!Alarm"]), ValueFactory.create(True), 0.05) builder.add_row(Assignment(["!Alarm"]), ValueFactory.create(False), 0.95) jc = ChanceNode("JohnCalls", builder.build()) jc.add_input_node(a) network.add_node(jc) action = ActionNode("Action") action.add_value(ValueFactory.create("CallPolice")) action.add_value(ValueFactory.create("DoNothing")) network.add_node(action) value = UtilityNode("Util1") value.add_input_node(b) value.add_input_node(action) value.add_utility( Assignment(Assignment("Burglary", True), "Action", ValueFactory.create("CallPolice")), -0.5) value.add_utility( Assignment(Assignment("Burglary", False), "Action", ValueFactory.create("CallPolice")), -1.0) value.add_utility( Assignment(Assignment("Burglary", True), "Action", ValueFactory.create("DoNothing")), 0.0) value.add_utility( Assignment(Assignment("Burglary", False), "Action", ValueFactory.create("DoNothing")), 0.0) network.add_node(value) value2 = UtilityNode("Util2") value2.add_input_node(b) value2.add_input_node(action) value2.add_utility( Assignment(Assignment("Burglary", True), "Action", ValueFactory.create("CallPolice")), 0.0) value2.add_utility( Assignment(Assignment("Burglary", False), "Action", ValueFactory.create("CallPolice")), 0.0) value2.add_utility( Assignment(Assignment("Burglary", True), "Action", ValueFactory.create("DoNothing")), -10.0) value2.add_utility( Assignment(Assignment("Burglary", False), "Action", ValueFactory.create("DoNothing")), 0.5) network.add_node(value2) return network
def test_dirichlet(self): old_discretisation_settings = Settings.discretization_buckets Settings.discretization_buckets = 250 alphas = list() alphas.append(40.0) alphas.append(80.0) alphas = np.array(alphas) dirichlet = DirichletDensityFunction(alphas) distrib = ContinuousDistribution("x", dirichlet) assert isinstance(distrib.sample(), ArrayVal) assert 2 == len(distrib.sample()) assert distrib.sample().get_array()[0] == pytest.approx(0.33, abs=0.15) ############################################## # dirichlet distribution 자바 코드에 버그가 있음. ############################################## # assert distrib.get_prob_density(ArrayVal([1./3, 2./3])) == pytest.approx(8.0, abs=0.5) n = ChanceNode("x", distrib) network = BNetwork() network.add_node(n) table = VariableElimination().query_prob(network, "x") sum = 0. for value in table.get_values(): if value.get_array()[0] < 0.33333: sum += table.get_prob(value) assert sum == pytest.approx(0.5, abs=0.1) conversion1 = VariableElimination().query_prob(network, "x") assert abs( len(conversion1.get_posterior(Assignment()).get_values()) - Settings.discretization_buckets) < 10 assert conversion1.get_posterior(Assignment()).get_prob( ValueFactory.create("[0.3333,0.6666]")) == pytest.approx(0.02, abs=0.05) conversion3 = SamplingAlgorithm(4000, 1000).query_prob(network, "x") # DistributionViewer(conversion3) # Thread.sleep(3000000) # TODO: 아래 테스트 케이스 문제 없는지 확인 필요. # assert conversion3.to_continuous().get_prob_density(ValueFactory.create("[0.3333,0.6666]")) == pytest.approx(9.0, abs=1.5) assert distrib.get_function().get_mean()[0] == pytest.approx(0.333333, abs=0.01) assert distrib.get_function().get_variance()[0] == pytest.approx( 0.002, abs=0.01) assert conversion3.to_continuous().get_function().get_mean( )[0] == pytest.approx(0.333333, abs=0.05) assert conversion3.to_continuous().get_function().get_variance( )[0] == pytest.approx(0.002, abs=0.05) Settings.discretization_buckets = old_discretisation_settings
def construct_iwsds_network(): network = BNetwork() builder = CategoricalTableBuilder("i_u") builder.add_row(ValueFactory.create("ki"), 0.4) builder.add_row(ValueFactory.create("of"), 0.3) builder.add_row(ValueFactory.create("co"), 0.3) i_u = ChanceNode("i_u", builder.build()) network.add_node(i_u) builder = ConditionalTableBuilder("a_u") builder.add_row(Assignment("i_u", "ki"), ValueFactory.create("ki"), 0.9) builder.add_row(Assignment("i_u", "ki"), ValueFactory.create("null"), 0.1) builder.add_row(Assignment("i_u", "of"), ValueFactory.create("of"), 0.9) builder.add_row(Assignment("i_u", "of"), ValueFactory.create("null"), 0.1) builder.add_row(Assignment("i_u", "co"), ValueFactory.create("co"), 0.9) builder.add_row(Assignment("i_u", "co"), ValueFactory.create("null"), 0.1) a_u = ChanceNode("a_u", builder.build()) a_u.add_input_node(i_u) network.add_node(a_u) builder = ConditionalTableBuilder("a_u") builder.add_row(Assignment("a_u", "ki"), ValueFactory.create("True"), 0.0) builder.add_row(Assignment("a_u", "ki"), ValueFactory.create("False"), 1.0) builder.add_row(Assignment("a_u", "of"), ValueFactory.create("True"), 0.6) builder.add_row(Assignment("a_u", "of"), ValueFactory.create("False"), 0.4) builder.add_row(Assignment("a_u", "co"), ValueFactory.create("True"), 0.15) builder.add_row(Assignment("a_u", "co"), ValueFactory.create("False"), 0.85) builder.add_row(Assignment("a_u", "null"), ValueFactory.create("True"), 0.25) builder.add_row(Assignment("a_u", "null"), ValueFactory.create("False"), 0.75) o = ChanceNode("o", builder.build()) o.add_input_node(a_u) network.add_node(o) a_m = ActionNode("a_m") a_m.add_value(ValueFactory.create("ki")) a_m.add_value(ValueFactory.create("of")) a_m.add_value(ValueFactory.create("co")) a_m.add_value(ValueFactory.create("rep")) network.add_node(a_m) r = UtilityNode("r") r.add_input_node(a_m) r.add_input_node(i_u) r.add_utility( Assignment(Assignment("a_m", "ki"), Assignment("i_u", "ki")), 3) r.add_utility( Assignment(Assignment("a_m", "ki"), Assignment("i_u", "of")), -5) r.add_utility( Assignment(Assignment("a_m", "ki"), Assignment("i_u", "co")), -5) r.add_utility( Assignment(Assignment("a_m", "of"), Assignment("i_u", "ki")), -5) r.add_utility( Assignment(Assignment("a_m", "of"), Assignment("i_u", "of")), 3) r.add_utility( Assignment(Assignment("a_m", "of"), Assignment("i_u", "co")), -5) r.add_utility( Assignment(Assignment("a_m", "co"), Assignment("i_u", "ki")), -5) r.add_utility( Assignment(Assignment("a_m", "co"), Assignment("i_u", "of")), -5) r.add_utility( Assignment(Assignment("a_m", "co"), Assignment("i_u", "co")), 3) r.add_utility( Assignment(Assignment("a_m", "rep"), Assignment("i_u", "ki")), -0.5) r.add_utility( Assignment(Assignment("a_m", "rep"), Assignment("i_u", "of")), -0.5) r.add_utility( Assignment(Assignment("a_m", "rep"), Assignment("i_u", "co")), -0.5) network.add_node(r) return network