def fight(ann, best, dump=False): board = game.getEmptyBoard() current = 'O' action = random.randint(0, game.BOARD_SIZE * game.BOARD_SIZE - 1) board, won = game.move(board, action, current) if dump: print(game.render(board)) current = 'O' if current == 'X' else 'X' while True: entry = state_lists_to_batch([board], current) probs = best(entry)[0] if current == 'O' else ann(entry)[0] while True: action = np.argmax(probs) if action not in game.possible_moves(board): probs[action] = -np.inf else: break board, won = game.move(board, action, current) if dump: print(game.render(board)) if len(game.possible_moves(board)) == 0: return None if won: break current = 'O' if current == 'X' else 'X' return best if current == 'O' else ann
def repl(game): print_game(game) user_in = raw_input() tokens = user_in.split(' ') game.command = user_in if tokens[0] == "move": if len(tokens) > 3 and tokens[3] == "promote": game.promote = True game.move(notation_to_coordinates(tokens[1]), notation_to_coordinates(tokens[2])) game.promote = False elif tokens[0] == "drop": square, piece = notation_to_drop(tokens[1], tokens[2]) game.drop(square, piece) lower_or_upper = "lower" if game.current_player == 1 else "UPPER" if game.command is not None: print lower_or_upper + " player action: " + game.command repl(game)
def move(self, game): board = game.moves[-1] if board.side == -1: board = board.invert_board() best_move = None best_score = 100.0 moves = self.get_moves(board) to_eval = [] for coords, new_board, new_score in moves: if new_score is None: to_eval += [ b.invert_board() for _, b, s in self.get_moves(new_board) if s is None ] else: # this is either win or forced draw game.move(*coords) return equity = self.evaluate_boards(to_eval) for coords, new_board, new_score in moves: best_opponent_score = -100 for _, _, s in self.get_moves(new_board): if s is None: one_ahead = next(equity)['predictions'][0] * -1 if one_ahead > best_opponent_score: best_opponent_score = one_ahead else: best_opponent_score = s * 100 break if best_score < best_opponent_score or best_move is None: best_score = best_opponent_score best_move = coords game.move(*best_move) logging.info("Evaluation = {0}".format(best_score))
def move(state): if state.game.select in state.game.history \ or game.winner(state.game) \ or game.tie(state.game): return False elif state.game.select: select_loc(state) game.move(state.game) else: game.select(state.game)
def choose_move(state): bestmove, bestmove_dir = None, None for direct in range(4): if not can_move(state, direct): continue s = state.copy() move(s, direct) fldsum = move_tree(make_prob_state(s), g_backtrack) if bestmove is None or fldsum > bestmove: bestmove, bestmove_dir = fldsum, direct return bestmove_dir
def arg_body(self): user = self.user match = user.get_match() # Remove inline button bot.deleteMessage(user.chat_id, self.message_id) if self.text == "/cancel": user.send_message("Move cancelled.") user.send_photo(photo=match.get_board_img(user)) elif self.text == "/accept": game.move(user, user.pending_arg)
def playerMove(self, plc): global cnt, nowply can = False for i in othello.canMove(disboard, ply): if plc == i: can = True break if can: othello.move(plc, disboard, ply) cnt += 1 nowply = -1 * nowply self.nowTurn.setText(" 인공지능 턴, 생각중...") self.resetBoard(disboard, nowply, plc) self.aiMove()
def recurse(board, moves): #print(len(moves)) if game.has_won(board): print(len(moves), " moves") print(moves) exit() elif len(moves) > 200: print("ERR?") print(moves) return if len(moves) == 52: print(".", end='') # Head all for from_stack in range(len(board.stacks)): f = deepcopy(board) if game.to_head(f, from_stack): new_moves = deepcopy(moves) new_moves.append(f"h {from_stack}") recurse(f, new_moves) for to_stack in range(len(board.stacks)): t = deepcopy(board) if game.move(t, from_stack, to_stack): new_moves = deepcopy(moves) new_moves.append(f"m {from_stack} {to_stack}") recurse(t, new_moves) s = deepcopy(board) if game.shift(s): new_moves = deepcopy(moves) new_moves.append(f"s") recurse(s, new_moves)
def solve(initial_position, pattern): initial_moves = [] position_moves = [initial_position, initial_moves] queue = deque([position_moves]) level = 0 seen = set() while (len(queue) > 0): print "starting level ", level, "No. of positions", len(queue) next_queue = deque([]) for curr_position, prev_moves in queue: if game.is_goal(curr_position, pattern): return [curr_position, prev_moves] current_space_cell = None if len(prev_moves) > 0: current_space_cell, _ = prev_moves[-1] for move in game.legal_moves(curr_position, current_space_cell): if len(prev_moves) > 0 and _is_reverse_move( move, prev_moves[-1]): continue next_position = game.move(curr_position, move) nps = _position_as_string(next_position) if (nps in seen): continue moves = list(prev_moves) moves.append(move) next_queue.append([next_position, moves]) seen.add(_position_as_string(curr_position)) queue = next_queue level = level + 1 return position_moves
def actions(self, state): if self.is_game_over(state): return "STOP" la = np.array(['UP', 'DOWN', 'RIGHT', 'LEFT']) is_legals_actions = np.array( [is_in_grid(move(state[0], a), self._grid.shape) for a in la]) return la[is_legals_actions].tolist()
def choose_min_max(m0, q0): dic = {} for d1 in (1, 2, 3, 4): if g.allow(m0, d1): dic[d1] = {'m': g.move(m0, d1)[0]} for d2 in range(4): for d3 in range(4): if dic[d1]['m'][d2][d3] == 0: dic[d1][str(d2) + str(d3) + '2'] = g.gen_xy2(dic[d1]['m'], (d2, d3)) dic[d1][str(d2) + str(d3) + '4'] = g.gen_xy4(dic[d1]['m'], (d2, d3)) del(dic[d1]['m']) for c1 in dic: if c1 != {}: for c2 in dic[c1]: dic[c1][c2] = t.test(dic[c1][c2]) for c1 in dic: if c1 != {}: dic[c1] = dic[c1][min(dic[c1], key=dic[c1].get)] else: dic[c1] = float('-inf') d = max(dic, key=dic.get) return d
def game_action(client, event, channel, nick, rest): if not rest: yield "do what, %s?" % (nick) else: action = parse_action(rest.strip().lower()) print action if action[0] == 'north': yield game.move(nick, game.NORTH) elif action[0] == 'south': yield game.move(nick, game.SOUTH) elif action[0] == 'east': yield game.move(nick, game.EAST) elif action[0] == 'west': yield game.move(nick, game.WEST) elif action[0] == 'take': yield "%s%s" % (nick, game.take(nick, action[1]), ) elif action[0] == 'use': yield game.use(nick, action[1], action[2])
def step(self, action): # Execute one time step within the environment (self.arr, alive) = game.spawn(self.arr) (dim, dir) = (action // 2, action % 2) # print("Taking action {}.".format((dim, dir))) (self.arr, succ) = game.move(self.arr, dim, dir) # reward = 1 if succ else 0 #-np.sum(self.arr) # reward = 100 - np.sum(self.arr) + 5 * np.max(self.arr) if succ else 0 reward = (float(self.arr.size - np.count_nonzero(self.arr)) / self.arr.size) return self.arr, reward, not alive, {}
def update(self): """ Updates ongoing move (if it exists). """ if self.m: sx, sy, dx, dy = itertools.chain.from_iterable(self.m) start = self[sx][sy] dest = self[dx][dy] d = utils.tsub((dx,dy),(sx,sy)) # calculate direction if start.rect.topleft == dest.rect.topleft: # reached destination self[sx][sy].rect = self.original self[dx][dy].image = self[sx][sy].image self[sx][sy].image = NULL_IMG self.m = [] game.moving = False if game.current == game.player: game.move() else: # keep moving start.rect.topleft = utils.tadd(start.rect.topleft, utils.tflip(d))
def run_game(): not_quit = True p1_turn = False # This will make sense below for who goes first p1_x = game.start_game() while not_quit: # Clear the display clear_display() # Initialize the game game_board = board.init_board() status = game.get_status(game_board, game.get_marker(p1_turn, p1_x)) # Take turns until game is won or lost or quit while status == "ongoing": # Change whose turn it is p1_turn = not p1_turn # Set current marker marker = game.get_marker(p1_turn, p1_x) # Get choice positions and valid positions and move print("\n----------------") print("\nOpen Options Board:") move_choice = game.move(*board.get_open_pos(game_board), p1_turn) # Update the board game_board = board.update_game_board(game_board, move_choice, marker) # Clear the display clear_display() # Display the Board print("\nGame Board:") board.display_board(game_board) # Check if game is won or stalemate status = game.get_status(game_board, marker) if status != "ongoing": break t_sleep(1) # See if they want to play again not_quit = game.set_replay(status, p1_turn) print("\nSorry to see you go, but I'll enjoy watching you walk away :)")
def main(): form = cgi.FieldStorage() try: user_data = data.read_users() user_info = user.get_session_from_cookie(user_data) selfplayer = user_info["username"] game_data = data.read_games() (p0, p1) = get_form_players(form) game_info = game.get_game_info(game_data, p0, p1) print_header() move = get_form_move(form) if move: try: game_data = game.move(game_data, game_info, selfplayer, move) data.write_games(game_data) except game.InvalidMove: print "<p>Invalid Move</p>" #print "<pre>" #print game_data #print "</pre>" game_over = game.is_game_over(game_info) if game_over[0]: print_game_over(game_info, game_over[1]) else: print_game(game_info, selfplayer) print_footer() except user.NotLoggedIn: print "Content-Type: text/html\n\n" print "<html><body>" print """<p>Not logged in. <a href="/cgi-bin/login.py">Log In</a> </p>""" print "</body></html>" # session not found, redirect back to login #print "Location: /cgi-bin/login.py\n\n" except (game.UnknownGame, MissingPlayers) as ex: print_header() print ex print """<a href="/cgi-bin/newgame.py">New Game</a></p>""" print_footer()
def choose_max_depth(m0, q0, d0): dt = 0 mt = c.deepcopy(m0) lst0 = [] lst0.append(mt) lst1 = [] mf = [ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0] ] while True: dt += 1 for jt in lst0: for it in (1, 2, 3, 4): if dt == 1: lst1.append((g.move(jt, it)[0], it)) else: lst1.append((g.move(jt[0], it)[0], jt[1])) lst0 = [] for m1 in lst1: for xt in range(3): for yt in range(3): if m1[0][xt][yt] == 0: lst0.append((g.gen_xy2(m1[0], (xt, yt)), m1[1])) lst0.append((g.gen_xy4(m1[0], (xt, yt)), m1[1])) if dt == d0: break dr = [] for mr in lst0: dr.append(t.test(mr[0], q0)) s = dr.index(max(dr)) return lst0[s][1]
def post(self, game_name): """ Looks like we got a Lando!! """ player = self.get_player(game_name) if player: if game.move(self.db_conn, game_name, player, self.get_argument('move')): self.set_status(204) else: self.set_status(400, 'Could not submit move') else: self.set_status(400, 'You are not in this game') return self.render()
def step_cost(self, state, action): cost = 0 pos = state[0] elems_state = state[1] nb_propellers = state[2] next_pos = move(pos, action) ind_next_e = self._e_ij(*next_pos) next_e = elems_state[ind_next_e] if next_e == "R": if nb_propellers > 1: cost += self._penalties["lose_propeller"] else: return 999999 if (next_e == "L") and self._earth_mode: cost += self._penalties["mow_to_earth"] cost += self._penalties["living"] return cost
def step_cost(self, state, action): cost = 0 pos = state[0] elems_state = state[1] nb_propellers = state[2] next_pos = move(pos, action) ind_next_e = self._e_ij(*next_pos) next_e = elems_state[ind_next_e] if next_e == 'R': if nb_propellers > 1: cost += self._penalties['lose_propeller'] else: return 999999 if (next_e == 'L') and self._earth_mode: cost += self._penalties['mow_to_earth'] cost += self._penalties['living'] return cost
def result(self, state, action): pos = state[0] elems_state = list(state[1]) nb_propellers = state[2] next_pos = move(pos, action) if is_in_grid(next_pos, self._grid.shape): pos = next_pos ind_next_e = self._e_ij(*next_pos) next_e = elems_state[ind_next_e] if next_e == "R": nb_propellers = max(nb_propellers - 1, 0) elems_state[ind_next_e] = elem_rules_trans(next_e, self._earth_mode) return (pos, "".join(elems_state), nb_propellers)
def result(self, state, action): pos = state[0] elems_state = list(state[1]) nb_propellers = state[2] next_pos = move(pos, action) if is_in_grid(next_pos, self._grid.shape): pos = next_pos ind_next_e = self._e_ij(*next_pos) next_e = elems_state[ind_next_e] if next_e == 'R': nb_propellers = max(nb_propellers - 1, 0) elems_state[ind_next_e] = elem_rules_trans(next_e, self._earth_mode) return (pos, ''.join(elems_state), nb_propellers)
def playAnEpisode(self, map1): states = [] for state in map1.all_states: if state.terminal == False: states.append(state) s = np.random.choice(states) a = np.random.choice(s.actions) map1.setState(s) states_actions_rewards = [(s.token, a, 0)] seen_states = set() seen_states.add(map1.currentState().token) num_steps = 0 while True: r = move(map1, a) num_steps += 1 s = map1.currentState() if s.token in seen_states: reward = -10./num_steps states_actions_rewards.append((s.token, None, reward)) break elif gameOver(map1): states_actions_rewards.append((s.token, None, r)) break else: a = self.policy[s.token] states_actions_rewards.append((s.token, a, r)) seen_states.add(s.token) G = 0 states_actions_returns = [] first = True for s, a, r in reversed(states_actions_rewards): if first: first = False else: states_actions_returns.append((s, a, G)) G = r + (0.9 * G) states_actions_returns.reverse() return states_actions_returns
def solve_pq(initial_position, pattern): """Solve using priority queue """ initial_moves = [] if game.is_goal(initial_position, pattern): return [initial_position, initial_moves] position_moves = [0, 0, 0, initial_position, initial_moves] queue = [position_moves] level = 0 seen = set() while (len(queue) > 0): print "starting level ", level, "No. of positions", len(queue) next_queue = [] for i, item in enumerate(queue): _, _, _, curr_position, prev_moves = item for j, move in enumerate(game.legal_moves(curr_position)): if len(prev_moves) > 0 and _is_reverse_move( move, prev_moves[-1]): continue next_position = game.move(curr_position, move) nps = _position_as_string(next_position) if (nps in seen): continue moves = list(prev_moves) moves.append(move) if game.is_goal(next_position, pattern): return [next_position, moves] priority = _score(next_position, pattern) heappush(next_queue, [priority, i, j, next_position, moves]) seen.add(_position_as_string(curr_position)) queue = nsmallest(10000, next_queue) level = level + 1 return position_moves
def choose_ex(m0, q0): dic = {} for d1 in (1, 2, 3, 4): if g.allow(m0, d1): dic[d1] = {'m': g.move(m0, d1)[0]} for d2 in range(4): for d3 in range(4): if dic[d1]['m'][d2][d3] == 0: dic[d1][str(d2) + str(d3) + '2'] = g.gen_xy2(dic[d1]['m'], (d2, d3)) dic[d1][str(d2) + str(d3) + '4'] = g.gen_xy4(dic[d1]['m'], (d2, d3)) del (dic[d1]['m']) for c1 in dic: if c1 != {}: for c2 in dic[c1]: dic[c1][c2] = t.test(dic[c1][c2]) for c1 in dic: if c1 != {}: dic[c1] = dic[c1][min(dic[c1], key=dic[c1].get)] else: dic[c1] = float('-inf') k = 0 for i in m0: for j in i: if j == 0: k += 1 dm = max(dic, key=dic.get) for c1 in dic: if c1 != dm: p = r.random() if p < math.exp(((dic[c1]-dic[dm])/(k+1))): return c1 return dm
def score(mt, qt): si = 0 # 此项无用 ls = [[], [], [], []] # 上 下 左 右 移动一步,所有生成情况的评分表 f ∈ (-inf, inf) dl2 = [[], [], [], []] # 评分表对应位置的生成坐标与值 (x, y, 2or4) for d1 in (1, 2, 3, 4): if g.allow(mt, d1) != 1: ls[d1 - 1] = [-float('inf')] dl2[d1 - 1] = [] continue mp, si = g.move(mt, d1, si) dl1 = [] for x in (0, 1, 2, 3): for y in (0, 1, 2, 3): if mp[x][y] == 0: dl1.append((x, y)) for d2 in dl1: md = g.gen_xy2(mp, d2) ls[d1 - 1].append(t.test(md)) dl2[d1 - 1].append((d2[0], d2[1], 2)) md = g.gen_xy4(mp, d2) ls[d1 - 1].append(t.test(md, qt)) dl2[d1 - 1].append((d2[0], d2[1], 4)) ''' for line in ls: print(line) for line in dl2: print(line) ''' return ls, dl2
self.Q[s.token][a] = 0 self.returns[(s.token, a)] = [] else: pass for T in range(2000): self.states_actions_returns = self.playAnEpisode(map1) seen_s_a_pairs = set() for s, a, G in self.states_actions_returns: if (s, a) not in seen_s_a_pairs: old_q = self.Q[s][a] self.returns[(s, a)].append(G) self.Q[s][a] = max(self.returns[(s, a)]) seen_s_a_pairs.add((s, a)) for sa in states: self.policy[sa.token] = max_dict(self.Q[sa.token])[0] agent = Agent() agent.generateOptimalPolicy(map1) map1 = standardMap() map1.setState(map1.all_states[13]) map1.displayMap() while True: mv = agent.policy[map1.currentState().token] move(map1, mv) map1.displayMap() sleep(0.3) if gameOver(map1): break map1.displayMap()
def aiMove(self): global cnt, disboard, nowply, ply, isGameing if len(othello.canMove(disboard, 1)) == 0 and len(othello.canMove(disboard, -1)) == 0: blacknum = 0 whitlenum = 0 for i in range(8): for j in range(8): if disboard[i][j] == 1: blacknum += 1 elif disboard[i][j] == -1: whitlenum += 1 self.nowTurn.setText(" 경기 완료") if ply == 1: if blacknum > whitlenum: self.winner.setText(" 흑, 플레이어가 이겼습니다.") elif blacknum < whitlenum: self.winner.setText(" 백, AI가 이겼습니다.") else: self.winner.setText(" 무승부 입니다.") elif ply == -1: if blacknum > whitlenum: self.winner.setText(" 흑, AI가 이겼습니다.") elif blacknum < whitlenum: self.winner.setText(" 백, 플레이어가 이겼습니다.") else: self.winner.setText(" 무승부 입니다.") isGameing = False else: if cnt == 0: cnt += 1 othello.move([5, 4], disboard, nowply) nowply = -1 * nowply self.resetBoard(disboard, nowply, [5, 4]) self.playerMovePre() # elif cnt == 1: # cnt += 1 # # if disboard[4][5] == -1 * nowply or disboard[5][4] == -1 * nowply: # othello.move([5, 5], disboard, nowply) # nowply = -1 * nowply # self.resetBoard(disboard, nowply, [5, 5]) # elif disboard[2][3] == -1 * nowply or disboard[3][2] == -1 * nowply: # othello.move([2, 2], disboard, nowply) # nowply = -1 * nowply # self.resetBoard(disboard, nowply, [2, 2]) # # self.playerMovePre() else: if len(othello.canMove(disboard, nowply)) == 0: self.showMessageBox() nowply = -1 * nowply self.resetBoard(disboard, nowply) self.playerMovePre() else: disboardTmp = c.deepcopy(disboard) if nowply == -1: for a in range(8): for b in range(8): disboardTmp[a][b] = -1 * disboardTmp[a][b] mcts, canMov = self.searchValueDown(disboardTmp, 1, 1, cnt, searchDeep) rsWR = mcts[0][0] / mcts[0][1] for i in range(1, len(canMov)): if rsWR < mcts[i][0] / mcts[i][1]: rsWR = mcts[i][0] / mcts[i][1] sameM = list() sameNum = list() for i in range(len(canMov)): if rsWR == mcts[i][0] / mcts[i][1]: sameM.append(mcts[i]) sameNum.append(i) sameRandom = random.randrange(0, len(sameM)) self.aiWinRate.setText( " 인공지능 예상 승률: " + str("{0:.2f}".format(mcts[sameNum[sameRandom]][0] / mcts[sameNum[sameRandom]][1] * 100)) + "%") cnt += 1 othello.move(canMov[sameNum[sameRandom]], disboard, nowply) nowply = -1 * nowply self.resetBoard(disboard, nowply, canMov[sameNum[sameRandom]]) self.playerMovePre()
def searchValueDown(self, nowBoard, color, mycolor, cnt, repeat): # 승률 반환 시킬 배열 tmp = list() # 정책망에 넣기 위해 변환한 데이터 저장 tmp1 = list() # 정책망용 데이터 빈곳 생성 for i in range(2): tmp1.append([ [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ]) # 정책망용 데이터 대입 for i in range(8): for j in range(8): if nowBoard[i][j] == 1: tmp1[0][i][j] = 1 elif nowBoard[i][j] == -1: tmp1[1][i][j] = 1 # 정책망 대입으로 값 도출 aiResult = model.predict(np.array([tmp1])) # 움직일 수 있는 곳 저장 canMove = othello.canMove(nowBoard, color) # 움직일 수 있는 곳이 없다, 상대편으로 넘긴다. if len(canMove) == 0: Wi = 0 Ni = 0 downBoard = c.deepcopy(nowBoard) downWN, _ = self.searchValueDown(downBoard, -1 * color, mycolor, cnt + 1, repeat) for j in downWN: Wi += j[0] Ni += j[1] tmp.append([Wi, Ni]) return tmp, [] # 움직일 수 있는 곳이 한곳보다 많다 else: # 움직일 수 있는 곳들에 두었던 확률을 저장할 변수 canMovA = list() # 움직일 수 있는 곳들에 두었던 확률 저장 for a in canMove: canMovA.append(aiResult[0][a[0] + a[1] * 8]) # 선별한 움직임을 저장할 변수 canMovF = list() # 범위 안의 것을 저장 for i in range(len(canMovA)): if SearchRange \ > abs(1 - canMovA[i]): canMovF.append(canMove[i]) # 만약 최소 갯수 이하라면 최소 갯수는 채우게 만듬 if len(canMovF) == 0: canMovF = list() if len(canMovF) == 0: if semimode: rsD = 3 for i in range(len(canMovA)): if rsD > abs(1 - canMovA[i]): rsD = abs(1 - canMovA[i]) for i in range(len(canMovA)): if rsD + SemiRange >= abs(1 - canMovA[i]): canMovF.append(canMove[i]) else: for z in range(len(canMovA)): canMovF.append(canMove[z]) # 둘려는 수가 한개인데 원턴킬당하면 빡치니까 전체 다 조사함 if len(canMovF) == 1: downBoard = c.deepcopy(nowBoard) othello.move(canMovF[0], downBoard, color) # 원턴킬이 나면 무조건 0,1 반환시키게 검사 onekill = False nextCanMove = othello.canMove(downBoard, -1 * color) for y in nextCanMove: dnextBoard = c.deepcopy(downBoard) othello.move(y, dnextBoard, -1 * color) if len(othello.canMove(dnextBoard, -1 * color)) == 0 and len( othello.canMove(dnextBoard, color)) == 0: my = 0 en = 0 for j in downBoard: for k in j: if k == mycolor: my += 1 elif k == -1 * mycolor: en += 1 if my == 0: onekill = True break # 원턴킬 이면 이렇게 함 if onekill: canMovF = list() for z in range(len(canMovA)): canMovF.append(canMove[z]) # 선별한 수들을 둠 for i in canMovF: # 둔 다음을 저장할 변수 downBoard = c.deepcopy(nowBoard) # 말을 움직임 othello.move(i, downBoard, color) if repeat == 1: aiInput = c.deepcopy(tmp1) aitmp = [ [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] aitmp[i[1]][i[0]] = 1 aiInput.append(aitmp) aiResultValue = modelValue.predict(np.array([aiInput])) tmp.append([aiResultValue[0][0], 1]) elif len(othello.canMove(downBoard, -1 * color)) == 0 and len(othello.canMove(downBoard, color)) == 0: my = 0 en = 0 for j in downBoard: for k in j: if k == mycolor: my += 1 elif k == -1 * mycolor: en += 1 if my > en: tmp.append([1, 1]) elif my < en: tmp.append([0, 1]) else: tmp.append([0.5, 1]) else: # 원턴킬이 나면 무조건 0,1 반환시키게 검사 onekill = False nextCanMove = othello.canMove(downBoard, -1 * color) for y in nextCanMove: dnextBoard = c.deepcopy(downBoard) othello.move(y, dnextBoard, -1 * color) if len(othello.canMove(dnextBoard, -1 * color)) == 0 and len( othello.canMove(dnextBoard, color)) == 0: my = 0 en = 0 for j in downBoard: for k in j: if k == mycolor: my += 1 elif k == -1 * mycolor: en += 1 if my == 0: onekill = True break # 원턴킬 이면 이렇게 함 if onekill: tmp.append([0, 1]) else: Wi = 0 Ni = 0 downWN, _ = self.searchValueDown(downBoard, -1 * color, mycolor, cnt + 1, repeat - 1) for j in downWN: Wi += j[0] Ni += j[1] tmp.append([Wi, Ni]) return tmp, canMovF
from game import move from map import rooms move(rooms["Reception"]["exits"], "south") == rooms["Admins"]
keyboard=keyboards.gameKeyboard( event.user_id).get_keyboard(), message='Ты в игре!') else: hard = 1 / 0 except: Lsvk.messages.send( user_id=event.user_id, random_id=get_random_id(), message='Некоректный ввод, попробуй еще раз') if userStatus[1] == '2': #Если в игре var = [ 'Назад', 'Налево', 'Направо', 'Вперед' ] #Идем назад налево на право (Функция Move возвращает true когда вышел из лабиринта) if event.text in var: if game.move(event.user_id, var.index(event.text)): data.setUserStatus(event.user_id, 'G0') Lsvk.messages.send( user_id=event.user_id, random_id=get_random_id(), keyboard=keyboards.gameMenu( event.user_id).get_keyboard(), message='Поздравляем, ты вышел из лабиринта!') else: i = game.check( event.user_id ) #Если после передвижения, не вышел из лабиринта осматриваемся, возвращает массив отметок if len(i) == 2: Lsvk.messages.send( user_id=event.user_id, random_id=get_random_id(),
from abs_se import self_abs # print(hex(int('34'))) print(self_abs(-9)) # print(self_abs('d')) print(type('23')) # 返回多个值 from game import move import math print(move(4, 5, 2, 30)) r = move(100, 100, 60, math.pi / 6) print(r) from quadratic import quadratic print(quadratic(1, -2, 1)) # 测试: print('quadratic(2, 3, 1) =', quadratic(2, 3, 1)) print('quadratic(1, 3, -4) =', quadratic(1, 3, -4)) if quadratic(2, 3, 1) != (-0.5, -1.0): print('测试失败') elif quadratic(1, 3, -4) != (1.0, -4.0): print('测试失败') else: print('测试成功')
def actions(self, state): if self.is_game_over(state): return "STOP" la = np.array(["UP", "DOWN", "RIGHT", "LEFT"]) is_legals_actions = np.array([is_in_grid(move(state[0], a), self._grid.shape) for a in la]) return la[is_legals_actions].tolist()
def searchDown(nowBoard, color, mycolor, cnt): tmp = list() tmp1 = list() for i in range(2): tmp1.append([ [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ]) for i in range(8): for j in range(8): if nowBoard[i][j] == 1: tmp1[0][i][j] = 1 elif nowBoard[i][j] == -1: tmp1[1][i][j] = 1 aiResult = model.predict(np.array([tmp1])) canMove = othello.canMove(nowBoard, color) if len(canMove) == 0: Wi = 0 Ni = 0 downBoard = c.deepcopy(nowBoard) downWN, _ = searchDown(downBoard, -1 * color, mycolor, cnt + 1) for j in downWN: Wi += j[0] Ni += j[1] tmp.append([Wi, Ni]) return tmp, [] else: canMovA = list() for a in canMove: canMovA.append(aiResult[0][a[0] + a[1] * 8]) canMovF = list() for i in range(len(canMovA)): if 0.71 > abs(1 - canMovA[i]): canMovF.append(canMove[i]) if len(canMovF) == 0: rsD = 3 for i in range(len(canMovA)): if rsD > abs(1 - canMovA[i]): rsD = abs(1 - canMovA[i]) for i in range(len(canMovA)): if rsD + 0.0000000001 > abs(1 - canMovA[i]): canMovF.append(canMove[i]) for i in canMovF: downBoard = c.deepcopy(nowBoard) othello.move(i, downBoard, color) if len(othello.canMove(downBoard, -1 * color)) == 0 and len(othello.canMove(downBoard, color)) == 0: my = 0 en = 0 for j in downBoard: for k in j: if k == mycolor: my += 1 elif k == -1 * mycolor: en += 1 if my > en: tmp.append([1, 1]) elif my < en: tmp.append([0, 1]) else: tmp.append([0.5, 1]) else: Wi = 0 Ni = 0 downWN, _ = searchDown(downBoard, -1 * color, mycolor, cnt + 1) for j in downWN: Wi += j[0] Ni += j[1] tmp.append([Wi, Ni]) return tmp, canMovF
def random_move(self): get_available_moves game.move(random_moves[0],random_moves[1])