def main(): #TODO delete code from here go_state_obj = GoStateObject() from search import DeepLearningSearch from forward_prop_network import ForwardPropNetwork import tensorflow as tf import time #search_algorithm = DeepLearningSearch() sess = tf.Session() #forward_prop_network = ForwardPropNetwork(sess) import copy search_algorithm = MontecarloSearch() go_state_obj = GoStateObject() args = sys.argv user = '******' # NNGS set up nngs = NNGS() # connect nngs nngs.connect_nngs() # login nngs.login(user) # match if len(args) == 3 and args[2] in ('B', 'W'): nngs.match(args[1], args[2]) # wait nngs.nngs_wait() init_time = time.time() sum_time = 0 # Go, Player set up rule = Go() players = [ Player(0, nngs.player_name['B']), Player(1, nngs.player_name['W']) ] players[0].next_player = players[1] players[1].next_player = players[0] player = players[0] #last_opponent_move=None last_opponent_move = None while True: print(rule.print_board(go_state_obj)) if player.player_name == user: my_start_time = time.time() (go_state_obj, move) = search_algorithm.next_move(go_state_obj, player, 1800 - (float(sum_time)), last_opponent_move) print("next_move:", move) #go_state_obj = rule.move_and_return_state(go_state_obj, player, move) nngs.send_data(move2nngs(move, rule)) my_end_time = time.time() sum_time = sum_time + my_end_time - my_start_time print("Sum time" + str(sum_time)) #search_algorithm.thread_close() else: z = nngs.nngs_wait() if z == None: print("None! None! なん!") exit(0) if z < 0: continue #go_state_obj = rule.move_and_return_state(go_state_obj,player, nngs2move(rule, player, z)) nn_move = nngs2move(rule, player, z) print(nn_move) if nn_move == 0 or nn_move == 3: last_opponent_move = rule._PASS else: last_opponent_move = nn_move go_state_obj = rule.move_and_return_state( go_state_obj, player, nn_move) player = player.next_player nngs.close_nngs()
def main(): # TODO delete code from here go_state_obj = GoStateObject() from search import DeepLearningSearch from forward_prop_network import ForwardPropNetwork import tensorflow as tf search_algorithm = DeepLearningSearch() sess = tf.Session() forward_prop_network = ForwardPropNetwork(sess) import copy # search_algorithm=MontecarloSearch() go_state_obj = GoStateObject() args = sys.argv user = '******' # NNGS set up nngs = NNGS() # connect nngs nngs.connect_nngs() # login nngs.login(user) # match if len(args) == 3 and args[2] in ('B', 'W'): nngs.match(args[1], args[2]) # wait nngs.nngs_wait() # Go, Player set up rule = Go() players = [ Player(0, nngs.player_name['B']), Player(1, nngs.player_name['W']) ] players[0].next_player = players[1] players[1].next_player = players[0] player = players[0] last_opponent_move = None while True: print(rule.print_board(go_state_obj)) if player.player_name == user: (go_state_obj, move) = search_algorithm.next_move(forward_prop_network, sess, go_state_obj, player, last_opponent_move) print("next_move:", move) # go_state_obj = rule.move_and_return_state(go_state_obj, player, move) if go_state_obj.turns_num >= 500: print("over 10 PASS in nngs_client_pass.py") move = rule._PASS nngs.send_data(move2nngs(move, rule)) else: z = nngs.nngs_wait() if z < 0: continue nn_move = nngs2move(rule, player, z) print(nn_move) if nn_move == 0 or nn_move == 3: last_opponent_move = rule._PASS else: last_opponent_move = nn_move go_state_obj = rule.move_and_return_state( go_state_obj, player, nn_move) player = player.next_player nngs.close_nngs()