def interpret_data(self, args): if 'error' in args: debug('Error: ' + args['error']) return if 'number' in args: self.my_number = args['number'] if 'board' in args: self.dimension = args['board']['dimension'] self.turn = args['turn'] self.grid = args['board']['grid'] self.all_blocks = args['blocks'][:] self.bonus_squares = {tuple(coords) for coords in args['board']['bonus_squares']} for x in xrange(len(self.all_blocks)): for y in xrange(len(self.all_blocks[x])): self.all_blocks[x][y] = [Point(offset) for offset in self.all_blocks[x][y]] self.all_blocks[x][y][0].x * 9000 self.all_blocks[x][y][0].y * 9001 if (('move' in args) and (args['move'] == 1)): send_command(" ".join(str(x) for x in util.run_search_function(self, util.memoize(self.find_move))))
def defensive_player(board): return \ run_search_function(board, search_fn=alpha_beta_search, eval_fn=defensive_evaluate, timeout=600) def slow_player(board): return \
def ab_iterative_player(board): return \ run_search_function(board, search_fn=alpha_beta_search, eval_fn=basic_evaluate, timeout=600) def agressive_player(board): return \
for row in xrange(6): for col in xrange(7): # Check for consecutive tokens in all three directions if board.get_cell(row, col) == board.get_current_player_id(): score_col -= get_chain_len(0, board, row, col, current_player) score_row -= get_chain_len(1, board, row, col, current_player) score_diag -= get_chain_len(2, board, row, col, current_player) elif board.get_cell(row, col) == board.get_other_player_id(): score_col += get_chain_len(0, board, row, col, other_player) score_row += get_chain_len(1, board, row, col, other_player) score_diag += get_chain_len(2, board, row, col, other_player) if board.get_cell(row, col) == board.get_current_player_id(): score -= abs(3 - col) * 100 elif board.get_cell(row, col) == board.get_other_player_id(): score += abs(3 - col) * 100 score += score_row + score_col + score_diag return score random_player = lambda board: rand_select(board) basic_player = lambda board: minimax(board, depth=4, eval_fn=basic_evaluate) new_player = lambda board: minimax(board, depth=4, eval_fn=new_evaluate) progressive_deepening_player = lambda board: run_search_function( board, search_fn=minimax, eval_fn=basic_evaluate)
get_next_moves_fn = get_all_next_moves, is_terminal_fn = is_terminal, verbose = True): """ Hace una busqueda minimax en el tablero a la profundidad especificada. board -- la instancia ConnectFourBoard a evaluar depth -- la profundidad del arbol de busqueda (medida como la distancia maxima de la raiz a una hoja) eval_fn -- (opcional) la funcion de evaluacion para utilizar en una hoja del arbol; revise "focused_evaluate" para ver un ejemplo Returna un entero, el numero de columna que la busqueda indica donde debe agregar su ficha """ best_val = None for move, new_board in get_next_moves_fn(board): val = -1 * minimax_find_board_value(new_board, depth-1, eval_fn, get_next_moves_fn, is_terminal_fn) if best_val == None or val > best_val[0]: best_val = (val, move, new_board) if verbose: print "MINIMAX: Decided on column %d with rating %d" % (best_val[1], best_val[0]) return best_val[1] basic_player = lambda board: minimax(board, depth=4, eval_fn=basic_evaluate) progressive_deepening_player = lambda board: run_search_function(board, search_fn=minimax, eval_fn=basic_evaluate)
def ab_iterative_player(board): return run_search_function(board, search_fn=alpha_beta_search, eval_fn=focused_evaluate, timeout=5)
# if count >= 1: # (current_possible4_by3, current_possible4_by2) = find_possible4(board, current_chains, board.get_other_player_id()) # (other_possible4_by3, other_possible4_by2) = find_possible4(board, other_chains, board.get_current_player_id()) # #other_possible4_by3, other_possible4_by2 =0,0 # score = 43*(current_possible4_by3 - other_possible4_by3) + 17*(current_possible4_by2 - other_possible4_by2) # elif count == 0: # score = board.longest_chain(board.get_current_player_id()) * 10 # # Prefer having your pieces in the center of the board. # for row in range(6): # for col in range(7): # if board.get_cell(row, col) == board.get_current_player_id(): # score -= abs(3-col) # elif board.get_cell(row, col) == board.get_other_player_id(): # score += abs(3-col) # return score # Comment this line after you've fully implemented better_evaluate #better_evaluate = memoize(basic_evaluate) # Uncomment this line to make your better_evaluate run faster. better_evaluate = memoize(better_evaluate) # A player that uses alpha-beta and better_evaluate: #def my_player(board): # return run_search_function(board, search_fn=alpha_beta_search, eval_fn=better_evaluate, timeout=5) #my_player = lambda board: alpha_beta_search(board, depth=4, eval_fn=better_evaluate) my_player = lambda board: run_search_function( board, search_fn=alpha_beta_search, eval_fn=better_evaluate)
def progressive_deepening_player(board): return run_search_function(board, search_fn=minimax, eval_fn=basic_evaluate)
def my_player(board): return run_search_function(board, search_fn=alpha_beta_search, eval_fn=better_evaluate, timeout=8)
def progressive_deepening_player(board): return run_search_function(board, search_fn=minimax, eval_fn=basic_evaluate, timeout=10) def alphabeta_player(board): return alpha_beta_search(board, depth=6, eval_fn=basic_evaluate)
def my_player(board): return run_search_function(board, search_fn=minimax, eval_fn=better_evaluate, timeout=5)