def plurality_value(examples): """ Return the most popular target value for this set of examples. (If target is binary, this is the majority; otherwise plurality). """ popular = argmax_random_tie(values[target], key=lambda v: count(target, v, examples)) return DecisionLeaf(popular)
def hill_climbing(self, problem, map_canvas): """ hill climbing where number of neighbors is taken as user input """ def find_neighbors(state, number_of_neighbors=100): """ finds neighbors using two_opt method """ neighbors = [] for i in range(number_of_neighbors): new_state = problem.two_opt(state) neighbors.append(Node(new_state)) state = new_state return neighbors current = Node(problem.initial) while(1): neighbors = find_neighbors(current.state, self.no_of_neighbors.get()) neighbor = utils.argmax_random_tie(neighbors, key=lambda node: problem.value(node.state)) map_canvas.delete('poly') points = [] for city in current.state: points.append(self.frame_locations[city][0]) points.append(self.frame_locations[city][1]) map_canvas.create_polygon(points, outline='red', width=3, fill='', tag='poly') neighbor_points = [] for city in neighbor.state: neighbor_points.append(self.frame_locations[city][0]) neighbor_points.append(self.frame_locations[city][1]) map_canvas.create_polygon(neighbor_points, outline='red', width=1, fill='', tag='poly') map_canvas.update() map_canvas.after(self.speed.get()) if problem.value(neighbor.state) > problem.value(current.state): current.state = neighbor.state self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state))))
def hill_climbing(problem): current = Node(problem.initial) while True: neighbors = current.expand(problem) if not neighbors: break neighbor = argmax_random_tie(neighbors, key=lambda node: problem.value(node.state)) if problem.value(neighbor.state) <= problem.value(current.state): break current = neighbor return current.state
def hill_climbing(self, problem, map_canvas): """ hill climbing where number of neighbors is taken as user input """ def find_neighbors(state, number_of_neighbors=100): """ finds neighbors using two_opt method """ neighbors = [] for i in range(number_of_neighbors): new_state = problem.two_opt(state) neighbors.append(Node(new_state)) state = new_state return neighbors current = Node(problem.initial) tiempo_inicial = time() while (1): print("kd") neighbors = find_neighbors(current.state, self.no_of_neighbors.get()) neighbor = utils.argmax_random_tie( neighbors, key=lambda node: problem.value(node.state)) map_canvas.delete('poly') points = [] for city in current.state: points.append(self.frame_locations[city][0]) points.append(self.frame_locations[city][1]) map_canvas.create_polygon(points, outline='red', width=3, fill='', tag='poly') neighbor_points = [] for city in neighbor.state: neighbor_points.append(self.frame_locations[city][0]) neighbor_points.append(self.frame_locations[city][1]) map_canvas.create_polygon(neighbor_points, outline='red', width=1, fill='', tag='poly') map_canvas.update() map_canvas.after(self.speed.get()) if problem.value(neighbor.state) > problem.value(current.state): current.state = neighbor.state self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state)))) tiempo_final = time() tiempo_ejecucion = tiempo_final - tiempo_inicial print('El tiempo de ejecucion fue:', tiempo_ejecucion) #En segundos
def hill_climbing(problem): """From the initial node, keep choosing the neighbor with highest value""" current = Node(problem.initial) while True: neighbors = current.expand(problem) if not neighbors: break neighbor = argmax_random_tie( neighbors, key=lambda node: problem.value(node.state)) if problem.value(neighbor.state) <= problem.value(current.state): break current = neighbor return current.state
def hill_climbing(problem): """From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Fig. 4.2]""" current = Node(problem.initial) while True: neighbors = current.expand(problem) if not neighbors: break neighbor = argmax_random_tie(neighbors, lambda node: problem.value(node.state)) if problem.value(neighbor.state) <= problem.value(current.state): break current = neighbor return current.state
def hill_climbing(problem): """From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Figure 4.2]""" current = Node(problem.initial) while True: neighbors = current.expand(problem) if not neighbors: break neighbor = argmax_random_tie( neighbors, key=lambda node: problem.value(node.state)) if problem.value(neighbor.state) <= problem.value(current.state): break current = neighbor print('.', end='', flush=True) return current.state
def alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None): """Search game to determine best action; use alpha-beta pruning. This version cuts off search and uses an evaluation function.""" player = game.to_move(state) def max_value(state, alpha, beta, depth): if cutoff_test(state, depth): return eval_fn(state) v = -infinity succ = game.successors(state) for (a, s) in succ: v = max(v, min_value(s, alpha, beta, depth+1)) if v >= beta: succ.close() return v alpha = max(alpha, v) return v def min_value(state, alpha, beta, depth): if cutoff_test(state, depth): return eval_fn(state) v = infinity succ = game.successors(state) for (a, s) in succ: v = min(v, max_value(s, alpha, beta, depth+1)) if v <= alpha: succ.close() return v beta = min(beta, v) return v # Body of alphabeta_search starts here: # The default test cuts off at depth d or at a terminal state cutoff_test = (cutoff_test or (lambda state,depth: depth>d or game.terminal_test(state))) eval_fn = eval_fn or (lambda state: game.utility(player, state)) action, state = argmax_random_tie(game.successors(state), lambda ((a, s)): min_value(s, -infinity, infinity, 0)) return action
def decision_tree_learning(examples, attributes, parent_examples, classes_list): # # Checks if the given examples have the same classification # examples: [{exampledictionary}, 'class'] # return: pair with (true/false, the classification) sameclass,classification = is_same(examples) if len(examples)==0: return Tree(plurality_value(parent_examples)) elif sameclass: return Tree(classification) elif len(attributes)==0: return Tree(plurality_value(examples)) else: # create a list of the attribute names (attributes.keys()), calculate the importance of each of # them, then get the one with highest value #attributename = argmax(attributes.keys(), lambda ((a)): importance(a, examples, attributes,classes_list)) attributename = argmax_random_tie(attributes.keys(), lambda ((a)): importance(a, examples, attributes,classes_list)) tree = Tree(attributename) for vk in attributes[attributename]: exs = [] for example in examples: exvalues = example[0] if exvalues[attributename] == vk: exs.append(example) newattributes = remove_dict_entry(attributename, attributes) #remove_member(attribute, attibutes) subtree = decision_tree_learning(exs, newattributes, examples,classes_list) #subtree = decision_tree_learning(exs, attributes, examples,classes_list) # make a label by combining attribute name with a spacific # attribute value label = str(vk) #{attribute.key:vk} tree.add_branch(label, subtree) return tree
def choose_attribute(attrs, examples): """Choose the attribute with the highest information gain.""" return argmax_random_tie(attrs, key=lambda a: information_gain(a, examples))
def choose_attribute(attrs, examples): "Choose the attribute with the highest information gain." return argmax_random_tie(attrs, key=lambda a: information_gain(a, examples))
def plurality_value(examples): """Return the most popular target value for this set of examples. (If target is binary, this is the majority; otherwise plurality.)""" popular = argmax_random_tie(values[target], key=lambda v: count(target, v, examples)) return DecisionLeaf(popular)
def plurality_value(examples): popular = argmax_random_tie(values[target], key=lambda v: count(target, v, examples)) return DecisionLeaf(popular)
def arbitrate(self): self.most_desirable = argmax_random_tie(self.evaluators, lambda e: e.calculate_desirability()) self.most_desirable.set_goal()
def arbitrate(self): self.most_desirable = argmax_random_tie( self.evaluators, lambda e: e.calculate_desirability()) self.most_desirable.set_goal()
def plurality_value(self, examples): """Return the most popular target value for this set of examples.""" return argmax_random_tie(self.values[self.target], key=lambda v: self.count(self.target, v, examples))