def move_coordinates(): fen = Entry.query.first().board s = State() s.board = chess.Board(fen) if not s.board.is_game_over(): source = int(request.args.get('from', default='')) target = int(request.args.get('to', default='')) promotion = True if request.args.get('promotion', default='') == 'true' else False move = s.board.san( chess.Move(source, target, promotion=chess.QUEEN if promotion else None)) # MONTE: move_uci = chess.Move(source, target, promotion=chess.QUEEN if promotion else None) if move is not None and move != "": print("human moves", move) try: s.board.push_san(move) bk = Entry.query.update(dict(board=s.board.fen())) db.session.commit() if use_mc: # MONTE: Note monte won't work on heroku bc it stores state; ai_mc.push_move(move_uci) computer_move() except Exception: traceback.print_exc() fen = Entry.query.first().board s.board = chess.Board(fen) response = app.response_class(response=s.board.fen(), status=200) print(s.board) return response print("GAME IS OVER") response = app.response_class(response="game over", status=200) return response
def computer_move(): aimove = None fen = Entry.query.first().board s = State() s.board = chess.Board(fen) if not use_mc: # MINIMAX possible_moves = ai.minimax(s.board) probs = [x[1] for x in possible_moves] moves = [x[0] for x in possible_moves] probs = probs / np.sum(probs) aimove = np.random.choice(moves, p=probs) s.board.push(aimove) else: # MONTE: monte carlo agent aimove_mc, val, improved_policy = ai_mc.select_move(MC_SEARCH_ITER) s.board.push(chess.Move.from_uci(aimove_mc.a)) bk = Entry.query.update(dict(board=s.board.fen())) db.session.commit()