def play(board): if input("먼저 두시겠습니까?\n 선공:0, 후공:1 을 입력해주세요.\n") == '0': print("선공입니다.") player_turn = True else: print("후공입니다.") player_turn = False while True: if player_turn: board.print_board() success = board.place( int(input("열을 입력해주세요(1~7)")) - 1, player_turn) if not success: print("잘못 두셨습니다. 다시 두세요.") continue else: board.print_board() print("계산중입니다...") column = heuristic(board, not player_turn) board.place(column, player_turn) if board.check_game(): board.print_board() if player_turn: print("당신이 이겼습니다.") else: print("인공지능이 이겼습니다.") break # 턴 변경 player_turn = not player_turn heu.first_turn = False
def calculate_state_heuristic(self , state): h = heuristic(state, self.goal_list) h.calculate_euclidian_distance() state.distance = h.euclidian_distance state.cost = state.distance + state.depth
def quiesce(fen, alpha, beta, flag, options): # flag check if not flag.is_set(): return 0, 0 # heuristic if options.heuristic == 'NeuralNetwork': stand_pat = heuristic.nn_heuristic(fen, options, options.model) elif options.heuristic == 'Random': stand_pat = heuristic.random_heuristic() else: stand_pat = heuristic.heuristic(fen, options) nodes = 1 if (stand_pat >= beta): return beta, nodes board = Board(fen) if len(board.piece_map()) > 8: delta = True else: delta = False if delta: # full delta pruning if (stand_pat < alpha - 1000): return alpha, nodes if (stand_pat > alpha): alpha = stand_pat # expansion and search legal = board.legal_moves for move in legal: board.push(move) new = board.copy() board.pop() if board.is_capture(move) or new.is_check(): # delta pruning if delta and board.is_capture(move): value = value_captured_piece( board.piece_type_at(move.to_square)) + 200 if (stand_pat + value < alpha): continue score, count = quiesce(new.fen(), -beta, -alpha, flag, options) score = -score nodes += count if (score >= beta): return beta, nodes if (score > alpha): alpha = score return alpha, nodes
def __init__(self, initBoard, movement, previousScore, depth, isMax, turn, myTurn): self.myTurn = myTurn #this is constant self.isMax = isMax self.turn = turn self.movement = movement self.initBoard = [x[:] for x in initBoard] self.finalBoard = getFinalBoard(self.initBoard, movement) self.previousScore = previousScore self.currentScore = h.heuristic(self.previousScore, self.initBoard, self.finalBoard, isMax, turn, myTurn) self.children = [] self.depth = depth if self.depth > 0: self.giveBirth()
def console_solve(argv): """ console_solve is used to run the program in console form. The function takes the input and validates it, returning either an error message for too many inputs if there are more than 1 input in the list or an incorrect input if the input isn't 0-8 non-repeating. If the input was validated, the program creates a puzzle object and starts a timer. The program solves the puzzle then stops the timer. Once stopped, the solutions set will be printed to console with the final time, and finally returning the completion string. Parameters ---------- argv : list list of inputs given through command prompt. Returns ------- str Returns string representing error msg or completion msg. """ if len(argv) > 1: return TOO_MANY_INPUT_ERROR + f"{len(argv)} inputs." puzzle_str = [int(i) for i in list(argv[0])] if not Puzzle.validate(puzzle_str): return print(INVALID_INPUT_ERROR) puzzle = Puzzle(puzzle_str) start_time = perf_counter() puzzle = H.heuristic(puzzle) soln_set = puzzle.get_soln_states() end_time = perf_counter() for idx, puzzle in enumerate(soln_set): if idx == 0: print("Initital State:") else: print(f"State: {idx}") Puzzle.puzzle_printer(puzzle) print() print(f"Total time: {round(end_time - start_time, 5)}s.") return COMPLETION
def main(): num_AI = int(input("Enter your desired number of AIs (1 or 2): ")) assert num_AI == 1 or num_AI == 2 color = None if num_AI == 1: color = int(input("Enter color for AI, WHITE=1 and BLACK=-1: ")) new = input('Would you like to load from a FEN? (y/n): ') fen = chess.STARTING_FEN if new == 'y': fen = input('Enter your FEN: ') b = chess.Board(fen) h = heuristic.heuristic() if color: run_human_AI_game(b, h, color) else: run_AI_game(b, h) print(chess.pgn.Game().from_board(b))
print "> %s" % line words = line.split(" ") if words[0]=='PING' and not testing: s.send("PONG "+word[1]+"\r\n") if len(words)>2 and words[1]=='PRIVMSG': quote = line.split(":")[2] quote = quote.split("\r\n")[0] user = words[0].split(":")[1] user = user.split("!")[0] heur = heuristic.heuristic(ans[qnums[qid]]) full_ans = heuristic.plain_question(ans[qnums[qid]]) if good_enough(quote, full_ans) or (heur != "" and good_enough(quote, heur)): bot_say("%s got it right in %d seconds" % (user, time.time()-question_time)) if user not in scores: scores[user] = 0 scores[user] += 1 qid += 1 waiting = 2 timeout = time.time() + 5 if quote=="quizclown":
if len(words)>2 and words[1]=='PRIVMSG' and words[2]!=chan: if len(words[0].split(":")) > 1: user = words[0].split(":")[1] user = user.split("!")[0] if user == owner: words[2] = chan if len(words)>2 and words[1]=='PRIVMSG' and words[2]==chan: # somebody said something in the channel quote = ":".join(line.split(":")[2:]) quote = quote.split("\r\n")[0] user = words[0].split(":")[1] user = user.split("!")[0] heur = heuristic.heuristic(ans[qnums[question_number]]) full_ans = heuristic.plain_question(ans[qnums[question_number]]) # was it a correct answer ? if good_enough(quote, full_ans) or (heur != "" and good_enough(quote, heur)): bot_say("%s got it right in %d seconds" % (user, time.time()-question_time)) if user not in scores: scores[user] = 0 scores[user] += 1 question_number += 1 state = PAUSE if lurkmode: timeout = time.time() + random.randrange(60, 100)
if currentNode == end: break visited.append(currentNode) cont += 1 for i in range(4): # The assessed node through currentNode x = currentNode[0] + auxX[i] y = currentNode[1] + auxY[i] # Conditions to evaluate the current assessed node if (([x, y] not in visited) and (x >= 0 and x < rows) and (y >= 0 and y < rows)): # Cost G that considers the currentNode distance to the source + current assessed node cost g = currentG[currentNode[0]][currentNode[1]] + costs[adjList[x] [y]] # Manhattam distance from the current assessed node to the target node h = hrc.heuristic([x, y], end, 50) # Total cost to the target node f = g + h # If pass through the current assessed node generates a smaller g then adds into the pq if g < currentG[x][y]: currentG[x][y] = g pq.put((f, g, [x, y])) # Adds current assessed node's dad dad[str([x, y])] = currentNode print() print("Cost of assessed nodes = " + str(cont)) print("Final Cost to achieve " + str(end) + " = " + str(currentG[end[0]][end[1]])) coord = end print("Path traveled:") print(coord)
def test_heuristic_beside_bad(self): self.assertEqual( heuristic.heuristic([[('beside', 'a', 'b')]], [['a', 'e', 'f', 'g'], [], ['b', 'c', 'd']], self.holding, self.arm, self.objects), heuristic.PLACE_IN_STACK_PENALTY * 2)
def test_heuristic_beside(self): self.assertEqual( heuristic.heuristic([[('beside', 'a', 'b')]], [['a'], ['b', 'c', 'd']], self.holding, self.arm, self.objects), 0)
def test_heuristic_zeroholding(self): self.assertEqual( heuristic.heuristic(self.intprt, [['c', 'f', 'a'], ['d', 'e', 'b']], self.holding, self.arm, self.objects), 0 + heuristic.NOT_HOLDING_PENALTY)
def test_heuristic_stackpenalty(self): self.assertEqual( heuristic.heuristic(*self.state), heuristic.PLACE_IN_STACK_PENALTY + heuristic.NOT_HOLDING_PENALTY)
def __init__(self, board): self._board = board self._heuristic = heuristic.heuristic(self._board)
def startTurn(turn, board): #게임 진행하기 global player_first try: player_input = 0 #플레이어 인풋 초기화 prev_input = [-1] * 2 if turn == 1: # 사람 차례일때 if player_first == 1: while player_input != '1' and player_input != '2' and player_input != '3' and player_input != '4' and player_input != '5' and player_input != '6' and player_input != '7': player_input = input("플레이어 차례입니다. 두실 위치를 입력하세요 : ") # if player_input == '4': print("첫수는 가운데 입력이 불가능해요.") player_input = 0 player_first = 1 else: for i in range(6): if i == 0: alphabet = 'A' elif i == 1: alphabet = 'B' elif i == 2: alphabet = 'C' elif i == 3: alphabet = 'D' elif i == 4: alphabet = 'E' else: alphabet = 'F' if board[i][int(player_input) - 1] == 0: board[i][int(player_input) - 1] = 1 prev_input[0] = i prev_input[1] = int(player_input) - 1 player_first = 0 update.update_people_play( con, str(player_input) + alphabet) return board, True, prev_input else: if i == 5: print("해당컬럼은 꽉찼습니다. 다시 입력해주세요") return board, False, prev_input if player_first != 1: while player_input != '1' and player_input != '2' and player_input != '3' and player_input != '4' and player_input != '5' and player_input != '6' and player_input != '7': player_input = input("플레이어 차례입니다. 두실 위치를 입력하세요 : ") # if player_input != '1' and player_input != '2' and player_input != '3' and player_input != '4' and player_input != '5' and player_input != '6' and player_input != '7': print("그곳은 입력이 불가능해요.") else: for i in range(6): if i == 0: alphabet = 'A' elif i == 1: alphabet = 'B' elif i == 2: alphabet = 'C' elif i == 3: alphabet = 'D' elif i == 4: alphabet = 'E' else: alphabet = 'F' if board[i][int(player_input) - 1] == 0: board[i][int(player_input) - 1] = 1 prev_input[0] = i prev_input[1] = int(player_input) - 1 update.update_people_play( con, str(player_input) + alphabet) return board, True, prev_input else: if i == 5: print( "Column " + player_input + " is already full. Please select another column." ) player_first = 0 return board, False, prev_input else: ai_input = ruleBase(board) #룰베이스에서 먼저 가져오기 player_first = 0 if (ai_input == -1): ai_input = heuristic(con, board) # 휴리스틱 함수 사용하여 AI 인풋 결정 for i in range(6): if board[i][ai_input] == 0: board[i][ai_input] = -1 prev_input[0] = i prev_input[1] = ai_input if i == 0: #위치 alphabet = 'A' elif i == 1: alphabet = 'B' elif i == 2: alphabet = 'C' elif i == 3: alphabet = 'D' elif i == 4: alphabet = 'E' else: alphabet = 'F' print("( AI placed it on ", str(ai_input + 1) + alphabet, ")") update.update_ai_play(con, str(ai_input + 1) + alphabet) #위닝라인 업데이트 return board, True, prev_input else: if i == 5: return board, False, prev_input except: print("ERROR 발생") print(board) with open('dump.sql', 'w') as f: f.write("**************************") for line in con.iterdump(): if line.startswith('INSERT INTO "WINNING_LINE"'): f.write('%s\n' % line) f.close
if is_render: env.render() action = model.predict(state) episode_rewards_sum = sum(epoche_rewards) if episode_rewards_sum < min_reward: action = 0 if time.clock() - time_begin > time_limit: action = 0 if suc_count < (epochs_count - current_epoch) / 2: # replace neural metworc with heuristic algorithm on low vertical coordinate if state[1] < 0.5 and random.random() > 0.5: action = heuristic(env, state) state_, reward, done, info = env.step(action) epoche_observations.append(state) epoche_rewards.append(reward) action_onehot = np.zeros(env.action_space.n) action_onehot[action] = 1 epoche_actions.append(action_onehot) if done: episode_rewards_sum = sum(epoche_rewards) max_reward = max(episode_rewards_sum, max_reward)
restore_path=weight_path) for i in range(1, 10): total_reward = 0 steps = 0 s = env.reset() epoche_rewards = [] start = time.clock() print("iteration: ", i) while True: env.render() frames.append(Image.fromarray(env.render(mode='rgb_array'))) if is_heuristic: a = heuristic(env, s) else: a = model.predict(s) # replace neural metworc with heuristic algorithm on low vertical coordinate #if s[1] < 0.1: # a = heuristic(env, s) state_, reward, done, info = env.step(a) epoche_rewards.append(reward) print("reward ", reward, "action ", a) episode_rewards_sum = sum(epoche_rewards) if episode_rewards_sum < -200: done = True
def negamax(node, depth, alpha, beta, flag, options): # flag check if not flag.is_set(): return 0, 0, [] # leaf node if (depth == 0): # 3-fold repetition check fen = node.position.split(' ') prev = ' '.join(fen[:2]) if prev in node.previous: return 0, 1, [] # heuristic if options.quiescence: ev, nodes = quiesce(node.position, alpha, beta, flag, options) return ev, nodes, [] else: if options.heuristic == 'NeuralNetwork': ev = heuristic.nn_heuristic(node.position, options, options.model) elif options.heuristic == 'Random': ev = heuristic.random_heuristic() else: ev = heuristic.heuristic(node.position, options) node.eval = ev return ev, 1, [] # expansion if node.next == []: board = Board(node.position) legal = board.legal_moves fen = node.position.split(' ') prev = ' '.join(fen[:2]) # solve problem of no legal moves if legal.count() == 0: # 3-fold repetition check if prev in node.previous: return 0, 1, [] # heuristic if options.heuristic == 'NeuralNetwork': ev = heuristic.nn_heuristic(node.position, options, options.model) elif options.heuristic == 'Random': ev = heuristic.random_heuristic() else: ev = heuristic.heuristic(node.position, options) node.eval = ev return ev, 1, [] for move in legal: board.push(move) new = Node(board.fen()) new.move = move.uci() board.pop() new.previous = node.previous.copy() new.previous.append(prev) node.next.append(new) # search best_pv = [] nodes = 0 for new in node.next: score, count, pv = negamax(new, depth - 1, -beta, -alpha, flag, options) score = -score nodes += count pv.insert(0, new.move) if (score >= beta): return beta, nodes, [] if (score > alpha): alpha = score best_pv = pv return alpha, nodes, best_pv