def squeeze_cluster(self, cluster_cells, cluster_id, max_moves=15): bboard = np.zeros((60, 60), dtype=np.bool) for cluster_id1 in cluster_cells: for x, y in cluster_cells[cluster_id1]: if bboard[y][x]: raise Exception("overlap") bboard[y][x] = True ext_set = self.__get_exterior_set(cluster_id, cluster_cells, bboard, max_dist=1, search_all=True) ext_set = list(ext_set) ext_set.sort( key=lambda pos: manhattan_distance(pos, self.center_of_board)) own_cells = list(cluster_cells[cluster_id]) own_cells.sort( key=lambda pos: manhattan_distance(pos, self.center_of_board), reverse=True) num_moves = 0 while len(ext_set) > 0 and len(own_cells) > 0: if num_moves > max_moves: break num_moves += 1 new_cell = ext_set.pop(0) assert (not bboard[new_cell[1]][new_cell[0]]) old_cell = own_cells.pop(0) if manhattan_distance(new_cell, self.center_of_board) > \ manhattan_distance(old_cell, self.center_of_board): # no need to proceed break cluster_cells[cluster_id].remove(old_cell) cluster_cells[cluster_id].add(new_cell) return num_moves
def evaluation_function(self, current_game_state, action): """ Design a better evaluation function here. The evaluation function takes in the current and proposed successor GameStates (pacman.py) and returns a number, where higher numbers are better. The code below extracts some useful information from the state, like the remaining food (new_food) and Pacman position after moving (new_pos). new_scared_times holds the number of moves that each ghost will remain scared because of Pacman having eaten a power pellet. Print out these variables to see what you're getting, then combine them to create a masterful evaluation function. """ # Useful information you can extract from a GameState (pacman.py) successor_game_state = current_game_state.generate_pacman_successor( action) new_pos = successor_game_state.get_pacman_position() new_food = successor_game_state.get_food() "*** YOUR CODE HERE ***" """ Variable Definitions for the current food, an alternate score, distance to food and ghost and the number of food in the successor state """ current_food = current_game_state.get_food() alternate_score = 0 food_distance = 99999 ghost_distance = 99999 food_successor = len(new_food.as_list()) """ Finding the closest food and ghost value """ for food in new_food.as_list(): distance = manhattan_distance(food, new_pos) food_distance = min([food_distance, distance]) for ghost in successor_game_state.get_ghost_positions(): distance = manhattan_distance(new_pos, ghost) ghost_distance = min([ghost_distance, distance]) """ If food is in the current position then add to alternative score """ pos_x, pos_y = new_pos if current_food[pos_x][pos_y]: alternate_score += 10 """ Score is calculated as reciprocal of the distance to closest food minus number of food plus aternate score """ score = 1.0 / food_distance - food_successor + alternate_score """ If a ghost is within 2 spaces then subtract 600 from score, should negate any positive score when ghost is near essentially """ if ghost_distance < 2: score -= 600 return score
def evaluation_function(self, current_game_state, action): """Return evaluation (number) based on game state and proposed action. *** Design a better evaluation function here. *** The evaluation function takes in the current and proposed successor GameStates (pacman.py) and returns a number, where higher numbers are better. The code samples below extracts some useful information from the state, like the remaining food (new_food) and Pacman position after moving (new_pos). new_scared_times holds the number of moves that each ghost will remain scared because of Pacman having eaten a power pellet. Print out these variables to see what you're getting, then combine them to create a masterful evaluation function. """ # Useful information you can extract from a GameState (pacman.py) successor_game_state =\ current_game_state.generate_pacman_successor(action) new_pos = successor_game_state.get_pacman_position() new_food = successor_game_state.get_food() new_ghost_states = successor_game_state.get_ghost_states() "*** YOUR CODE HERE ***" # Its the goal just do it!!! if successor_game_state.is_win(): return float('inf') # Base utility utility = successor_game_state.get_score() # Check how close a gost is to you and move accordingly for ghost_state in new_ghost_states: if ghost_state.scared_timer == 0: pos = ghost_state.get_position() distance_to_ghost = util.manhattan_distance(pos, new_pos) if distance_to_ghost <= 10: utility -= 10 - distance_to_ghost elif distance_to_ghost <= 2: utility -= 100 # If the next game state has you eating a food try and go there current_num_food = current_game_state.get_num_food() next_num_food = successor_game_state.get_num_food() if current_num_food > next_num_food: utility += 25 # Try and go closer to nearest food utility -= min([util.manhattan_distance(new_pos, pos) for pos in [(i, j) for i, lst in enumerate(new_food) for j, val in enumerate(lst) if val]]) return utility
def better_evaluation_function(current_game_state): """Your awesome evaluation function (question 5). Description: This function works off the simpler evaluation function above its primary deference is that it works off of the current game state and also evaluations power pellets and some small changes to the math when evaluating ghost and food. """ # Set base utility to current score utility = current_game_state.get_score() # Return max utility if you will win if current_game_state.is_win(): return float('inf') # Get pacmans current position current_position = current_game_state.get_pacman_position() # Try and go closer to nearest food food_dist = [util.manhattan_distance(current_position, pos) for pos in [(i, j) for i, lst in enumerate(current_game_state.get_food()) for j, val in enumerate(lst) if val]] utility -= min(food_dist) / len(food_dist) # Get distance to each ghost that is not scared dist = [util.manhattan_distance(current_position, ghost.get_position()) for ghost in current_game_state.get_ghost_states() if ghost.scared_timer == 0] # Try and stay away from all ghosts for d in dist: if d < 5: utility -= (10 - d) if d <= 1: utility -= 300 # Try and go closer to power capsules dist_capsules = [util.manhattan_distance(current_position, capsule) for capsule in current_game_state.get_capsules()] for c_dist in dist_capsules: utility -= c_dist # Return utility return utility
def make_observation(self, index): state = self.deep_copy() # Adds the sonar signal pos = state.get_agent_position(index) n = state.get_num_agents() distances = [noisy_distance(pos, state.get_agent_position(i)) for i in range(n)] state.agent_distances = distances # Remove states of distant opponents if index in self.blue_team: team = self.blue_team other_team = self.red_team else: other_team = self.blue_team team = self.red_team for enemy in other_team: seen = False enemy_pos = state.get_agent_position(enemy) for teammate in team: if util.manhattan_distance(enemy_pos, state.get_agent_position(teammate)) <= SIGHT_RANGE: seen = True if not seen: state.data.agent_states[enemy].configuration = None return state
def enhanced_pacman_features(state, action): """ For each state, this function is called with each legal action. It should return a counter with { <feature name> : <feature value>, ... } """ features = util.Counter() "*** YOUR CODE HERE ***" successor_game_state = state.generate_pacman_successor(action) new_pos = successor_game_state.get_pacman_position() new_food = successor_game_state.get_food() new_ghost_states = successor_game_state.get_ghost_states() new_scared_tiems = [ ghost_state.scared_timer for ghost_state in new_ghost_states ] if 'Stop' in action: features[action, 'val'] = -0.1 return features for ghost_state in new_ghost_states: ghost_pos = ghost_state.get_position() if ghost_pos == new_pos and ghost_state.scared_timer == 0: features[action, 'val'] = -100 return features food_locations = state.get_food().as_list() for food in food_locations: dis = [util.manhattan_distance(food, new_pos)] k = (1.0 / (max(dis) + 0.1)) features[action, 'val'] = k**3 return features
def killed_opponent(self, game_state,index): for opponent_index in self.opponent_indices: if self.opponent_positions[opponent_index] == game_state.get_initial_agent_position(opponent_index): return True elif not self.opponent_previous_positions[opponent_index] == None: if self.opponent_positions[opponent_index] == None and util.manhattan_distance(game_state.get_agent_position(index), self.opponent_previous_positions[opponent_index])<2: return True return False
def better_evaluation_function(current_game_state): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: The evaluation function considers the current score of a state and factors in the distance to a ghost, an edible ghost, and the closest food dot. If and edible ghost is nearby and is scared for long enough to get to, then going for that ghost is greatly incentivized as it adds alot of points. If there is any ghost nearby it deincentivizes that state, tempered by the distance of the ghost. The function also checks the closest food pellet. The closer the closest food pellet the more incentivization. These incentivizers of ghosts, edible ghosts and closest food dot are all summed with the score. That value is then returned. """ "*** YOUR CODE HERE ***" pos = current_game_state.get_pacman_position() food = current_game_state.get_food() ghost_states = current_game_state.get_ghost_states() food_weight = 1.0 ghost_weight = 1.0 edible_ghost_weight = 7.0 # because the score at each state matters value = current_game_state.get_score() # distance to ghosts ghost_value = 0 for ghost in ghost_states: distance_to_ghost = manhattan_distance(pos, ghost.get_position()) if distance_to_ghost > 0: if ghost.scared_timer > distance_to_ghost: # if ghost scared and close enough to get ghost_value += edible_ghost_weight / distance_to_ghost # eat it else: # else avoid ghost ghost_value -= ghost_weight / distance_to_ghost value += ghost_value # distance to closest food distances_to_food = [manhattan_distance(pos, x) for x in food.as_list()] if len(distances_to_food): #if food is found value += food_weight / min(distances_to_food) return value
def how_far(directions): heading = np.array([0, 1]) position = np.array([0, 0]) for (turn, dist) in parse(directions): if turn == 'L': heading = rotatePositive90(heading) elif turn == 'R': heading = rotateNegative90(heading) position += heading * dist return util.manhattan_distance((0, 0), position.tolist())
def manhattan_dist_sum(p, tiles = None): if not tiles: tiles = p.flat solved = puzzle.solved(p) s = 0 for t in tiles: s_pos = puzzle.get_position(p, t) t_pos = puzzle.get_position(solved, t) s += u.manhattan_distance(*s_pos, *t_pos) return s
def observe(self, observation, game_state, observer): """Update beliefs based on a distance observation and game_state. The noisy_distance is the estimated Manhattan distance to the opponent being tracked. Overrides InferenceModule.observe """ if len( {key: value for key, value in self.beliefs.items() if value > 0}) == 0: # THIS SHOULD NOT HAPPEN print("***************ALERT: ALL ZEROS********************", self.index) # but just in case, we will assume it was because opponent was # eaten and we somehow missed that self.observe_exact(self.get_initial_position(game_state), game_state) return if len(self.possibly_eaten_by) > 0: eaten = True for index, position in self.possibly_eaten_by: # if in starting position then we were the one eaten, # unless we were already there (in which case, if we can't see # the opponent exactly, they were eaten) if (((game_state.get_agent_position(index) == game_state.get_initial_agent_position(index)) and (position != game_state.get_agent_position(index)))): # WE WERE EATEN eaten = False break if eaten: # THE OPPONENT WE ARE TRACKING WAS EATEN, SO THEY RETURNED # TO STARTING POSITION self.observe_exact(self.get_initial_position(game_state), game_state) return # reset this data structure self.possibly_eaten_by = [] noisy_distance = observation observer_position = game_state.get_agent_position(observer) all_possible = util.Counter() for p in [p for p in self.legal_positions if self.beliefs[p] > 0]: true_distance = util.manhattan_distance(p, observer_position) all_possible[p] = game_state.get_distance_prob( true_distance, noisy_distance) * self.beliefs[p] all_possible.normalize() self.beliefs = all_possible
def better_evaluation_function(current_game_state): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: This evaluation function works by manipulating the values of the distances to the food pellets and basically evaluates the score of based on how close pacman is to food pellets and the number of pellets left. This evaluation function works for the tests and passes them but it certainly does not do a good job of evaluating all possible variables because it does not take into account ghosts and pellets and whether ghosts are scared and so on. So in other circumstances I do not think this evaluation function will perform well but it does perform well enough in this small classic test against one ghost """ "*** YOUR CODE HERE ***" """ Variable definitions of all valuable pieces of information Current pacman position and food List for the distances to food """ current_pos = current_game_state.get_pacman_position() current_food = current_game_state.get_food() old_score = current_game_state.get_score() food_distance = [] score = 0 """ List of all manhattan distances to food """ for food in current_food.as_list(): food_distance.append(manhattan_distance(food, current_pos)) """ Current food state is 50 times the number of food Value for the maximum distance to a food in food distance list Value for the minimum distance to a food in food distance list """ current_food_state = 50 * len(current_food.as_list()) max_food_dist = max(food_distance + [0.0]) min_food_dist = min(food_distance + [1000.0]) """ Score calculate by taking the value from the function of get score of current game state then subtracting the maximum and minimum food distance then adding the reciprocal of the sum of all food distances. Then subtract the current number of food times 50 and add the reciprocal of the minimum value in the food distance list """ score = (old_score - max_food_dist - min_food_dist + 1.0 / sum(food_distance + [0.001]) - current_food_state + 1.0 / min([food_distance, float("inf")])) return score
def check_distance(pos_a, pos_b): """Get the manhattan distance between any two points.""" if (pos_a, pos_b) in DistanceManager.distances: return DistanceManager.distances[(pos_a, pos_b)] elif (pos_b, pos_a) in DistanceManager.distances: return DistanceManager.distances[(pos_b, pos_a)] else: distance = manhattan_distance(pos_a, pos_b) DistanceManager.distances[(pos_a, pos_b)] = distance return distance
def zigzag(width, height, c_index): # https://rosettacode.org/wiki/Zig-zag_matrix#Python # modified by Keyi corner_entries = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)] corner = corner_entries[c_index] index_order = sorted( ((x, y) for x in range(width) for y in range(height)), key=lambda p: (manhattan_distance(p, corner))) result = {} for n, index in enumerate(index_order): result[n] = index return result
def averageCost(data, costF_idx, medoids_idx, cacheOn=False): ''' Compute the average cost of medoids based on certain cost function and do the clustering ''' # Init the cluster size = len(data) total_cost = {} medoids = {} for idx in medoids_idx: medoids[idx] = [] total_cost[idx] = 0.0 # Compute the distance and do the clustering for i in range(size): choice = -1 # Make a big number min_cost = float('inf') for m in medoids: if cacheOn == True: # Check for cache tmp = distances_cache.get((m,i), None) if cacheOn == False or tmp == None: if costF_idx == 0: # euclidean_distance tmp = euclidean_distance(data[m], data[i]) elif costF_idx == 1: # manhattan_distance tmp = manhattan_distance(data[m], data[i]) elif costF_idx == 2: # pearson_distance tmp = pearson_distance(data[m], data[i]) else: print('Error: unknown cost function idx: ' % (costF_idx)) if cacheOn == True: # Save the distance for acceleration distances_cache[(m,i)] = tmp # Clustering if tmp < min_cost: choice = m min_cost = tmp # Done the clustering medoids[choice].append(i) total_cost[choice] += min_cost # Compute the average cost avg_cost = 0.0 for idx in medoids_idx: avg_cost += total_cost[idx] / len(medoids[idx]) # Return the average cost and clustering return(avg_cost, medoids)
def assign_special_blocks(cluster, cluster_pos, bbox, board_layout, used_spots): special_blks = {} cells = {} for blk_id in cluster: blk_type = blk_id[0] if blk_type != "p" and blk_type != "r" and blk_type != "i": if blk_type not in special_blks: special_blks[blk_type] = 0 special_blks[blk_type] += 1 pos_x, pos_y = cluster_pos width, height = bbox centroid = pos_x + width // 2, pos_y + height / 2 for x in range(pos_x, pos_x + width): for y in range(pos_y, pos_y + width): blk_type = board_layout[y][x] pos = (x, y) if blk_type in special_blks and pos not in used_spots: # we found one if blk_type not in cells: cells[blk_type] = set() cells[blk_type].add(pos) used_spots.add(pos) if special_blks[blk_type] > 0: special_blks[blk_type] -= 1 # here is the difficult part. if we still have blocks left to assign, # we need to do an brute force search available_pos = {} for blk_type in special_blks: available_pos[blk_type] = [] for y in range(len(board_layout)): for x in range(len(board_layout[y])): pos = (x, y) blk_type = board_layout[y][x] if pos not in used_spots and blk_type in special_blks: available_pos[blk_type].append(pos) for blk_type in special_blks: num_blocks = special_blks[blk_type] pos_list = available_pos[blk_type] if len(pos_list) < num_blocks: raise Exception("Not enough blocks left for type: " + blk_type) pos_list.sort(key=lambda p: manhattan_distance(p, centroid)) for i in range(num_blocks): if blk_type not in cells: cells[blk_type] = set() cells[blk_type].add(pos_list[i]) used_spots.add(pos_list[i]) return cells
def apply_action(state, action, agent_index): """Edit the state to reflect the results of the action.""" legal = AgentRules.get_legal_actions(state, agent_index) if action not in legal: raise Exception("Illegal action " + str(action)) # Update Configuration agent_state = state.data.agent_states[agent_index] speed = 1.0 # if agent_state.is_pacman: speed = 0.5 vector = Actions.direction_to_vector(action, speed) old_config = agent_state.configuration agent_state.configuration = old_config.generate_successor(vector) # Eat next = agent_state.configuration.get_position() nearest = nearest_point(next) if next == nearest: is_red = state.is_on_red_team(agent_index) # Change agent type agent_state.is_pacman =\ [is_red, state.is_red(agent_state.configuration) ].count(True) == 1 # if he's no longer pacman, he's on his own side, # so reset the num carrying timer # agent_state.num_carrying *= int(agent_state.is_pacman) if agent_state.num_carrying > 0 and not agent_state.is_pacman: score = (agent_state.num_carrying if is_red else -1 * agent_state.num_carrying) state.data.score_change += score agent_state.num_returned += agent_state.num_carrying agent_state.num_carrying = 0 red_count = 0 blue_count = 0 for index in range(state.get_num_agents()): agent_state = state.data.agent_states[index] if index in state.get_red_team_indices(): red_count += agent_state.num_returned else: blue_count += agent_state.num_returned if (((red_count >= (TOTAL_FOOD / 2) - MIN_FOOD) or (blue_count >= (TOTAL_FOOD / 2) - MIN_FOOD))): state.data._win = True if agent_state.is_pacman and manhattan_distance(nearest, next) <= 0.9: AgentRules.consume(nearest, state, state.is_on_red_team(agent_index))
def enhanced_pacman_features(state, action): """ For each state, this function is called with each legal action. It should return a counter with { <feature name> : <feature value>, ... } """ features = util.Counter() "*** YOUR CODE HERE ***" successor = state.generate_successor(0, action) ghost_count = state.get_num_agents() - 1 for g in range(1, ghost_count): feature_base_name = "ghost_" + str(g) + "_" ghost_distance = util.manhattan_distance( successor.get_pacman_position(), successor.get_ghost_position(g)) ghost_is_scared = successor.get_ghost_state(g).scared_timer > 0 features[feature_base_name + "is_scared"] = int(ghost_is_scared) if ghost_is_scared: features["scared_" + feature_base_name + "distance"] = ghost_distance features[feature_base_name + "distance"] = 0 else: features["scared_" + feature_base_name + "distance"] = 0 features[feature_base_name + "distance"] = ghost_distance capsules = successor.get_capsules() min_cap_distance = None if len(capsules) > 0: for cap in capsules: cap_distance = util.manhattan_distance( successor.get_pacman_position(), cap) if not min_cap_distance or min_cap_distance > cap_distance: min_cap_distance = cap_distance else: min_cap_distance = 0 features["capsules_left"] = len(capsules) features["nearest_capsule"] = min_cap_distance features["score"] = successor.get_score() return features
def main(input_file): wire_paths = [] with open(input_file) as f: while True: line = f.readline().strip() if not line: break path = line.split(',') wire_paths.append(path) logging.debug(wire_paths) origin = (0, 0) # convert the first wire to a list of line segments # Segments are (start_point, end_point) tuples wire1_segments = wire_to_segments(origin, wire_paths[0]) logging.debug("Wire1 Segments: " + str(wire1_segments)) # Walk along the second wire, finding all intersection_points intersection_points = [] last_point = origin for instruction in wire_paths[1]: direction, count = split_instruction(instruction) for _ in range(count): new_point = (last_point[0] + direction[0], last_point[1] + direction[1]) # Check if new point is inside any of the line segments of wire 1 for segment in wire1_segments: if point_in_segment(new_point, segment): intersection_points.append(new_point) break logging.debug("Point {} did not intersect".format(new_point)) last_point = new_point logging.debug("Intersection Points: " + str(intersection_points)) min_point = None min_distance = math.inf for intersection in intersection_points: dist = util.manhattan_distance(origin, intersection) if dist < min_distance: min_point = intersection min_distance = dist print(f"Minimal Distance To Intersection: {min_distance}")
def is_goal_state(self, state): """Return True if and only if the state is a valid goal state. The state is Pacman's position. Overrides PositionSearchProblem.is_goal_test Fill this in with a goal test that will complete the problem definition. """ distances = [(util.manhattan_distance(state, food), food) for food in self.food.as_list()] distance, food = min(distances) if state == food: return True return False
def min_distance(coordinate, items): ''' Finds which item in the list of items is closest to (x, y), according to manhattan distance. Then returns the distance between the closest item and (x, y). Args: coordinate (Coordinate): (x, y) coordinate of the point items (list): List of coordinates Returns: An integer representing the distance between closest item and the coordinate. ''' if not items: return Grid.MAX_DISTANCE return min(util.manhattan_distance(coordinate, item) for item in items)
def main(input_file): wire_paths = [] with open(input_file) as f: while True: line = f.readline().strip() if not line: break path = line.split(',') wire_paths.append(path) logging.debug(wire_paths) origin = (0,0) # convert the first wire to a list of line segments # Segments are (start_point, end_point) tuples wire1_segments = wire_to_segments(origin, wire_paths[0]) logging.debug("Wire1 Segments: " + str(wire1_segments)) wire2_segments = wire_to_segments(origin, wire_paths[1]) logging.debug("Wire2 Segments: " + str(wire1_segments)) intersection_points = [] for segment_a in wire1_segments: for segment_b in wire2_segments: # Determine if segment a intersects with segment b if(segments_intersect(segment_a, segment_b)): logging.debug(f"Segment Intersection: {segment_a} and {segment_b}") intersection_points.extend(segment_intersect_points(segment_a, segment_b)) logging.debug("Intersection Points: " + str(intersection_points)) min_point = None min_distance = math.inf for intersection in intersection_points: if intersection == origin: continue dist = util.manhattan_distance(origin, intersection) if dist < min_distance: min_point = intersection min_distance = dist print(f"Minimal Distance To Intersection: {min_distance}")
def get_distribution(self, state): """Read variables from state""" ghost_state = state.get_ghost_state(self.index) legal_actions = state.get_legal_actions(self.index) pos = state.get_ghost_position(self.index) is_scared = ghost_state.scared_timer > 0 speed = 1 if is_scared: speed = 0.5 action_vectors = [ Actions.direction_to_vector( a, speed) for a in legal_actions] new_positions = [(pos[0] + a[0], pos[1] + a[1]) for a in action_vectors] pacman_position = state.get_pacman_position() # Select best actions given the state distances_to_pacman = [ manhattan_distance( pos, pacman_position) for pos in new_positions] if is_scared: best_score = max(distances_to_pacman) best_prob = self.prob_scared_flee else: best_score = min(distances_to_pacman) best_prob = self.prob_attack best_actions = [ action for action, distance in zip( legal_actions, distances_to_pacman) if distance == best_score] # Construct distribution dist = util.Counter() for a in best_actions: dist[a] = best_prob / len(best_actions) for a in legal_actions: dist[a] += (1 - best_prob) / len(legal_actions) dist.normalize() return dist
def apply_action(state, action): """ Edits the state to reflect the results of the action. """ legal = PacmanRules.get_legal_actions(state) if action not in legal: raise Exception("Illegal action " + str(action)) pacman_state = state.data.agent_states[0] # Update Configuration vector = Actions.direction_to_vector(action, PacmanRules.PACMAN_SPEED) pacman_state.configuration = pacman_state.configuration.generate_successor(vector) # Eat next = pacman_state.configuration.get_position() nearest = nearest_point(next) if manhattan_distance(nearest, next) <= 0.5: # Remove food PacmanRules.consume(nearest, state)
def corners_heuristic(state, problem): """Manhattan distance from each piece of the food to each other. Args: state: The current search state (a data structure you chose in your search problem) problem: The CornersProblem instance for this layout. This function should always return a number that is a lower bound on the shortest path from the state to a goal of the problem; i.e. it should be admissible (as well as consistent). """ # some variables you may want to use: # problem.corners -- these are the corner coordinates # problem.walls -- these are the walls of the maze, as a Grid (game.py) heuristic = 0 pos = copy.deepcopy(state[0]) corners = list(copy.deepcopy(state[1])) while corners: # get distance to each corner distances = [util.manhattan_distance(pos, cur) for cur in corners] # determine the shortest route min_dist = min(distances) # add up the total cost heuristic = heuristic + min_dist # establish where we are starting from next pos = corners[distances.index(min_dist)] # we've gotten this piece of food now, so we remove it corners.remove(pos) return heuristic
def deoverlap(self, cluster_cells, cluster_id, overlap_set): effort_count = 0 old_overlap_set = len(overlap_set) while len(overlap_set) > 0 and effort_count < 5: # boolean board bboard = self.__get_bboard(cluster_cells, False) ext = self.__get_exterior_set(cluster_id, cluster_cells, bboard) ext_list = list(ext) ext_list.sort( key=lambda p: manhattan_distance(p, self.center_of_board)) for ex in ext_list: if len(overlap_set) == 0: break cell = overlap_set.pop() cluster_cells[cluster_id].remove(cell) cluster_cells[cluster_id].add(ex) if len(overlap_set) == old_overlap_set: effort_count += 1 else: effort_count = 0 old_overlap_set = len(overlap_set) assert (len(cluster_cells[cluster_id]) == self.get_cluster_size( self.clusters[cluster_id]))
def get_paths_rec(self, current_path, end_x, end_y, steps, length): """ Recursive function that finds all valid paths from (x, y) to (end_x, end_y). :return: List of paths. Path is a list of [(number, number_color), cell_color]. """ x, y = current_path[-1] # If end of steps if steps == 0: # And got to end, return path found if current_path[-1] == (end_x, end_y): return [current_path] # Otherwise, path don't lead to end else: return [] # If end is too far for path or we got to a number (we checked earlier and this is not the end point) # don't continue search for this direction if manhattan_distance((x, y), (end_x, end_y)) > steps \ or (self.get_number_in_cell(x, y) != 0 and length - 1 != steps): return [] # Collect valid paths from this point paths = [] possible_steps = [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)] for possible_step in possible_steps: # If point not on board or point already in path, skip it if possible_step[0] < 0 or self.get_height() <= possible_step[0] or possible_step[1] < 0 \ or self.get_width() <= possible_step[1] or possible_step in current_path: continue paths += self.get_paths_rec(current_path + [possible_step], end_x, end_y, steps - 1, length) return paths
def get_features(state, action): features = dict() # get the s'(next state) state to make calculations next_state = state.next_state(action) food = state.get_food() snake_head = next_state.get_snake_head() distance_to_food = util.manhattan_distance(snake_head, food) # make distance_to_food between 0 and 1 so that it will not diverge with every update. distance_to_food = float(distance_to_food) / (next_state.height * next_state.width) features['distance_to_food'] = distance_to_food # 1 step away means that only the points that snake can move after one step. # Therefore, it doesn't include diagonals and back of the snake as it cannot move there from snake import Squares # features['#_of_walls_1_step_away'] = get_adjacent_count(next_state,Squares.Wall) tunnel_count, count = get_dead_end_count(next_state, Squares.Snake, Squares.Wall) # features['dead_end'] = 1 if tunnel_count == 2 or count == 3 else 0 depth = 12 features['new_dead_end'] = 1 if is_dead_end(state, action, depth) else 0 features['is_end_game'] = 1 if next_state.check_end_game() else 0 util.divide_all(features, 100.0) return features
def make_decision_by_nearest_position(self, cur_x, cur_y, cur_direction, target): ''' 在当前舰艇四个方向邻域中寻找与目标最接近的坐标,作为下一步行动目的地,返回下一步行动 param: cur_x, cur_y: 舰艇当前横纵坐标 cur_direction: 舰艇当前朝向 target: 目标舰艇坐标 return: 下一步行动 BoardAction 对象 ''' # 当前舰艇的四方向邻域坐标 neighbors = { 90: (cur_x, cur_y + 1), 180: (cur_x + 1, cur_y), 270: (cur_x, cur_y - 1), 0: (cur_x - 1, cur_y) } # 可供选择的行动策略 strategy_options = [{ 'distance': manhattan_distance(target, neighbor), 'next_move': neighbor, 'angular_to_be': angular } for angular, neighbor in neighbors.items()] # 寻找与目标最接近的坐标 nearest_strategies = sorted(strategy_options, key=lambda d: d['distance']) for strategy in nearest_strategies: if (self.is_decision_legal(*strategy['next_move'])): return BoardAction( stay=False, clockwise=(strategy['angular_to_be'] - cur_direction) > 0, angular_speed=abs(strategy['angular_to_be'] - cur_direction))
def targetFunction(data, costF_idx, medoids_idx, cacheOn=False, distDict={}, simDict={}, affinities={}, costType=CostType, namedPoints=True): ''' Compute the average cost of medoids based on certain cost function and do the clustering given the medoids ''' if costType not in ["total", "average", "modularity"]: print "unknown target function - check the global variables in the code" return(1) # Init the cluster size = len(data) total_cost = {} medoids = {} for idx in medoids_idx: medoids[idx] = [] total_cost[idx] = 0.0 assignErrors = [] # Compute the distance and do the clustering for i in range(size): choice = -1 # Make a big number min_cost = float('inf') # medoids themselves are also included into resulting cluster lists for m in medoids: if cacheOn == True: # Check for cache tmp = distances_cache.get((m,i), None) if cacheOn == False or tmp == None: if costF_idx == 0: # euclidean_distance tmp = euclidean_distance(data[m], data[i]) elif costF_idx == 1: # manhattan_distance tmp = manhattan_distance(data[m], data[i]) elif costF_idx == 2: # pearson_distance tmp = pearson_distance(data[m], data[i]) elif costF_idx == 3: # direct_distance tmp = direct_distance(data[m], data[i], distDict) elif costF_idx == 4: # similarity_distance tmp = similarity_distance(data[m], data[i], simDict) else: print('Error: unknown cost function idx: ' % (costF_idx)) if cacheOn == True: # Save the distance for acceleration distances_cache[(m,i)] = tmp # Clustering # Randomization for nodes/points isolated from all the medoids # in order to assign them to random clusters. Hope averaging will # be able to glean cases for which some medoids did appear in the # same connected component, and group those nodes together. if tmp==0.0 and min_cost==0.0: # no connection to either medoid rv = bernoulli.rvs(1./len(medoids_idx), size=1) if rv[0]==1.: choice = m elif tmp < min_cost: #if tmp < min_cost: choice = m min_cost = tmp # Done the clustering if choice == -1: print "ERROR: the node cannot be assigned" assignErrors.append(i) else: medoids[choice].append(i) total_cost[choice] += min_cost # Compute the target function if costType == "total": #print total_cost return(sum(total_cost.values()), medoids) elif costType == "average": # Compute the average cost avg_cost = 0.0 for idx in medoids_idx: avg_cost += total_cost[idx] / len(medoids[idx]) # Return the average cost and clustering return(avg_cost, medoids) elif costType == "modularity": # If the points are named, display the names if namedPoints == True: named_medoids = {} for medID in medoids_idx: named_medoids[data[medID]] = [] for pointID in medoids[medID]: named_medoids[data[medID]].append(data[pointID]) # "-" because we maximize modularity mod = -modularity(data, COST=costF_idx, distDict=distDict, edgeDict=affinities, medoids=named_medoids) else: mod = -modularity(data, COST=costF_idx, distDict=distDict, edgeDict=affinities, medoids=medoids) print "modularity computed" else: print "unknown target function" return(1) if len(assignErrors) > 0: print "unassigned nodes: ", assignErrors else: print "no unassigned nodes, all right" return(mod, medoids)
def totalCost(data, costF_idx, medoids_idx, cacheOn=CacheOn, distDict={}, simDict={}, acceleration=0): ''' Compute the total cost and do the clustering based on certain cost function (that is, assign each data point to certain cluster given the medoids) ''' # Init the cluster size = len(data) total_cost = 0.0 medoids = {} for idx in medoids_idx: medoids[idx] = [] # medoids['unassigned'] = [] unassigned = [] tmp = None # Compute the distance and do the clustering for i in xrange(size): choice = -1 # Make a big number min_cost = float('inf') for m in medoids: if cacheOn == True: # Check for cache tmp = distances_cache.get((m, i), None) if cacheOn == False or tmp == None: if costF_idx == 0: # euclidean_distance tmp = euclidean_distance(data[m], data[i]) elif costF_idx == 1: # manhattan_distance tmp = manhattan_distance(data[m], data[i]) elif costF_idx == 2: # pearson_distance tmp = pearson_distance(data[m], data[i]) elif costF_idx == 3: # direct_distance tmp = direct_distance(data[m], data[i], distDict) elif costF_idx == 4: # similarity_distance try: tmp = similarity_distance(data[m], data[i], simDict) except: print m, i print data[m] print data[i] else: print('Error: unknown cost function idx: %d' % (costF_idx)) if cacheOn == True: # Save the distance for acceleration distances_cache[(m, i)] = tmp # Clustering if tmp < min_cost: choice = m min_cost = tmp # Done the clustering if min_cost == 0: # 0 similarity to all the medoids unassigned.append(i) # medoids['unassigned'].append(i) else: medoids[choice].append(i) total_cost += min_cost if acceleration == 2: transformed_medoids = {} #dict(medoids) for i, m in enumerate(medoids.keys()): #print i, m transformed_medoids[str(i)] = {'med': m, 'nodes': medoids[m]} #transformed_medoids[i] = transformed_medoids.pop(m) return (total_cost, transformed_medoids) # Return the total cost and clustering return (total_cost, medoids )
def squeeze(self): # the idea is to pull every cell positions to the center of the board def zigzag(width, height, c_index): # https://rosettacode.org/wiki/Zig-zag_matrix#Python # modified by Keyi corner_entries = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)] corner = corner_entries[c_index] index_order = sorted( ((x, y) for x in range(width) for y in range(height)), key=lambda p: (manhattan_distance(p, corner))) result = {} for n, index in enumerate(index_order): result[n] = index return result cluster_pos = self.state cluster_cells = {} used_special_blocks_pos = set() special_cells = {} # make each position sets for cluster_id in cluster_pos: pos = cluster_pos[cluster_id] cluster_size = self.get_cluster_size(self.clusters[cluster_id]) square_size = self.square_sizes[cluster_id] bbox = self.compute_bbox(pos, square_size) # find four corners and compare which one is closer corners = [ pos, [pos[0] + bbox[0], pos[1]], [pos[0] + bbox[0], pos[1] + bbox[1]], [pos[0], pos[1] + bbox[1]] ] dists = [ manhattan_distance(p, self.center_of_board) for p in corners ] corner_index = np.argmin(dists) # we need to create a zig-zag index to maximize packing cells given # the bounding box matrix = zigzag(bbox[0], bbox[1], corner_index) # put into positions cells = set() count = 0 search_count = 0 while count < cluster_size: cell_pos = matrix[search_count] cell_pos = (pos[0] + cell_pos[0], pos[1] + cell_pos[1]) if self.is_cell_legal(None, cell_pos, self.clb_type): cells.add(cell_pos) count += 1 search_count += 1 cluster_cells[cluster_id] = cells extra_cells = self.assign_special_blocks(self.clusters[cluster_id], cluster_pos[cluster_id], bbox, self.board_layout, used_special_blocks_pos) special_cells[cluster_id] = extra_cells # now the fun part, lets squeeze more! # algorithm # in each iteration, each cluster selects top N manhattan distance cells # and then move to its exterior. # this avoids "mixture" boundary between two clusters in # first step: remove overlaps # several tweaks: # because the middle ones have limited spaces. we de-overlap the middle # ones first cluster_ids = list(cluster_pos.keys()) cluster_ids.sort(key=lambda cid: manhattan_distance( cluster_pos[cid], self.center_of_board)) special_working_set = set() for cluster_id1 in cluster_ids: overlap_set = set() for cluster_id2 in cluster_cells: if cluster_id1 == cluster_id2: continue overlap = cluster_cells[cluster_id1].intersection( cluster_cells[cluster_id2]) overlap_set = overlap_set.union(overlap) assert (len(cluster_cells[cluster_id1]) == self.get_cluster_size( self.clusters[cluster_id1])) # boolean board bboard = self.__get_bboard(cluster_cells, False) self.deoverlap(cluster_cells, cluster_id1, overlap_set) if overlap_set: print("Failed to de-overlap cluster ID:", cluster_id1, "Heuristics will be used to put them together") special_working_set.add(cluster_id1) extra_cells = self.find_space(bboard, len(overlap_set)) for cell in extra_cells: old_cell = overlap_set.pop() cluster_cells[cluster_id1].remove(old_cell) cluster_cells[cluster_id1].add(cell) assert (not bboard[cell[1]][cell[0]]) assert (len(cluster_cells[cluster_id1]) == self.get_cluster_size( self.clusters[cluster_id1])) for i in self.clusters: assert (len(cluster_cells[i]) == self.get_cluster_size( self.clusters[i])) # check no overlap self.__get_bboard(cluster_cells) # squeeze them to the center for it in range(self.squeeze_iter): # print("iter:", it) for cluster_id in cluster_cells: self.squeeze_cluster(cluster_cells, cluster_id) for cluster_id in special_working_set: while True: num_moves = self.squeeze_cluster(cluster_cells, cluster_id) if num_moves <= 5: break # merge them into per blk_type result_cells = {} for cluster_id in cluster_cells: result_cells[cluster_id] = {"p": cluster_cells[cluster_id]} # add special cells to the final position for cluster_id in special_cells: result_cells[cluster_id].update(special_cells[cluster_id]) # return centroids as well centroids = compute_centroids(result_cells) return result_cells, centroids
def min_manhattan_distance_heuristic_H(state, problem): left_H = util.state_elem_pos(state, 'H', problem.grid_size) dist_to_H = [util.manhattan_distance(state[0], xy) for xy in left_H] return min(dist_to_H) if len(dist_to_H) else 0
def targetFunction(data, costF_idx, medoids_idx, cacheOn=False, distDict={}, simDict={}, affinities={}, costType=CostType, namedPoints=True): ''' Compute the average cost of medoids based on certain cost function and do the clustering given the medoids ''' if costType not in ["total", "average", "modularity"]: print "unknown target function - check the global variables in the code" return (1) # Init the cluster size = len(data) total_cost = {} medoids = {} for idx in medoids_idx: medoids[idx] = [] total_cost[idx] = 0.0 assignErrors = [] # Compute the distance and do the clustering for i in range(size): choice = -1 # Make a big number min_cost = float('inf') # medoids themselves are also included into resulting cluster lists for m in medoids: if cacheOn == True: # Check for cache tmp = distances_cache.get((m, i), None) if cacheOn == False or tmp == None: if costF_idx == 0: # euclidean_distance tmp = euclidean_distance(data[m], data[i]) elif costF_idx == 1: # manhattan_distance tmp = manhattan_distance(data[m], data[i]) elif costF_idx == 2: # pearson_distance tmp = pearson_distance(data[m], data[i]) elif costF_idx == 3: # direct_distance tmp = direct_distance(data[m], data[i], distDict) elif costF_idx == 4: # similarity_distance tmp = similarity_distance(data[m], data[i], simDict) else: print('Error: unknown cost function idx: ' % (costF_idx)) if cacheOn == True: # Save the distance for acceleration distances_cache[(m, i)] = tmp # Clustering # Randomization for nodes/points isolated from all the medoids # in order to assign them to random clusters. Hope averaging will # be able to glean cases for which some medoids did appear in the # same connected component, and group those nodes together. if tmp == 0.0 and min_cost == 0.0: # no connection to either medoid rv = bernoulli.rvs(1. / len(medoids_idx), size=1) if rv[0] == 1.: choice = m elif tmp < min_cost: #if tmp < min_cost: choice = m min_cost = tmp # Done the clustering if choice == -1: print "ERROR: the node cannot be assigned" assignErrors.append(i) else: medoids[choice].append(i) total_cost[choice] += min_cost # Compute the target function if costType == "total": #print total_cost return (sum(total_cost.values()), medoids) elif costType == "average": # Compute the average cost avg_cost = 0.0 for idx in medoids_idx: avg_cost += total_cost[idx] / len(medoids[idx]) # Return the average cost and clustering return (avg_cost, medoids) elif costType == "modularity": # If the points are named, display the names if namedPoints == True: named_medoids = {} for medID in medoids_idx: named_medoids[data[medID]] = [] for pointID in medoids[medID]: named_medoids[data[medID]].append(data[pointID]) # "-" because we maximize modularity mod = -modularity(data, COST=costF_idx, distDict=distDict, edgeDict=affinities, medoids=named_medoids) else: mod = -modularity(data, COST=costF_idx, distDict=distDict, edgeDict=affinities, medoids=medoids) print "modularity computed" else: print "unknown target function" return (1) if len(assignErrors) > 0: print "unassigned nodes: ", assignErrors else: print "no unassigned nodes, all right" return (mod, medoids)