def get_value_and_move(starting_matrix, max_samples=1000): if max_samples <= 1: return (my_score(starting_matrix), dir_down) merged_states = [(merge(starting_matrix, direction), direction) for direction in all_directions] valid_states = [(mat, direction) for (mat, direction) in merged_states if mat != starting_matrix] weights = get_explore_weights([mat for mat, direction in valid_states]) num_samples_per_state = distribute(max_samples, weights) if not valid_states: # return 0 return (my_score(starting_matrix), dir_down) dir_scores = [] sample_count = 0 for (mat, direction), num_samples in zip(valid_states, num_samples_per_state): if not num_samples: continue new_value = get_value_of_post_move(mat, num_samples) dir_scores.append(new_value) sample_count += num_samples rankables = [(score, direction) for score, (mat, direction) in zip(dir_scores, valid_states)] return max(rankables)
def do_swipe(self, direction): new_mat = merge(self.mat, direction) if new_mat == self.mat: # No change - Don't add stuff to the board. return self.mat self.mat = new_mat self.add_to_random_state(direction) self.add_tile() return self.mat
def get_next_move(matrix): """Return one of the dir_ constants from game_logic""" for direction in prioritized_directions: if merge(matrix, direction) != matrix: return direction return dir_down # no good options
def get_next_move(matrix): """Return one of the dir_ constants from game_logic""" def get_value(mat): # Tuple ordering prefers earlier values. # Here we prefer new matricies, then higher-score matricies. return (mat != matrix, score(mat)) best_value, best_direction = max( (get_value(merge(matrix, direction)), direction) for direction in all_directions) return best_direction
def test_merge_on_two_unsorded_not_empty_lists(self): list1 = [4, 5, 2, 1] list2 = [3, 6, 9, 1] self.assertEqual(merge(list1, list2), [1, 1, 2, 3, 4, 5, 6, 9])
def test_merge_on_empty_lists(self): list1 = [] list2 = [] self.assertEqual(merge(list1, list2), [])