コード例 #1
0
ファイル: client.py プロジェクト: anttisalonen/spaeher
 def __init__(self, name, server, configurator):
     self.connected = True
     self.ownedTeamIDs = []
     self.gameState = game.GameState()
     self.name = name
     self.server = server
     self.isConfigurator = configurator
     self.ownTeamID = 0 if configurator else 1
     self.clientid = self.ownTeamID
     self.ai = ai.AI(self.ownTeamID)
コード例 #2
0
ファイル: main.py プロジェクト: rtaneja1997/TicTacToe
def run_game(): 
	print (welcome_message) 
	print ("") 
	print (instructions)
	choice=input("Pick H or T\n")
	while choice.upper() not in ['H', 'T']:
		 choice=input("Invalid input. Please pick H or T\n")

	#flip a coin 
	i=random.randint(0,1) 
	winner='HT'[i] 
	if winner==choice:
		turn='player'
		print ("Congratulations! You go first :)")
	else:
		turn='ai' 
		print ("Sorry, you go second :(")
	
	#game starts 
	game_ongoing=True 
	game_state = game.GameState(turn) #create a new game 

	while game_ongoing:


		if game_state.getTurn()=='ai': #AI's Turn 
			ai_status=ai.play(game_state)
			if ai_status==game.PLAYER_LOST: 
				print ("AI WON!")
				game_ongoing=False 
			if ai_status==game.TIE:
				game_ongoing=False
				print ("TIE!") 

		else: #Player Turn  
			game_state.printBoard() #display board for player 

			move=input("Please pick a tile\n") 
			status=game.update(game_state, move) 

			while status==game.NOT_VALID_MOVE: 
				move=input("Invalid tile. Please pick a tile that isn't occupied\n") 
				status=game.update(game_state, move) 

			#picked a valid tile, game updated accordingly 
			if status==game.PLAYER_WON:
				print ("CONGRATS! YOU WON!")
				game_ongoing=False 

			if status==game.TIE:
				game_ongong=False 
				print ("TIE!")

			
	game_state.printBoard() #display board for player 
コード例 #3
0
def play_run(balance, iterations, strategy):

	gameState = game.GameState(balance, strategy)

	for i in range(iterations):
		try:
			gameState.nextStep()
		except game.NotEnoughMoney as e:
			return 0

	return gameState.getBalance()
コード例 #4
0
    def _reset(self):
        self._agent_player = random.randint(0, 1)
        player_controllers = [self._bot]
        player_controllers.insert(self._agent_player,
                                  self._action_player_controller)
        self._game_state = game.GameState(player_controllers, self._drawless)
        game_over = -1
        while self._game_state.player_to_move != self._agent_player and game_over == -1:
            game_over = self._game_state.step()
        assert game_over == -1

        next_obs = self._get_observation()
        return ts.transition(next_obs, reward=0.0)
コード例 #5
0
def test_game_state_details():
    game_state = game.GameState(GAME_STATE_JSON)
    assert game_state.tournament_id == GAME_STATE_JSON['tournament_id']
    assert game_state.game_id == GAME_STATE_JSON['game_id']
    assert game_state.round == GAME_STATE_JSON['round']
    assert game_state.pot == GAME_STATE_JSON['pot']
    assert game_state.orbits == GAME_STATE_JSON['orbits']
    assert game_state.dealer == GAME_STATE_JSON['dealer']
    assert game_state.small_blind == GAME_STATE_JSON['small_blind']
    assert game_state.big_blind == GAME_STATE_JSON['small_blind'] * 2
    assert game_state.minimum_raise == GAME_STATE_JSON['minimum_raise']
    assert game_state.in_action == GAME_STATE_JSON['in_action']
    assert game_state.own_player.id == 1
    assert game_state.own_player.name == "Game On"
コード例 #6
0
ファイル: main.py プロジェクト: BEMELON/2018-AI-Study-Seminar
def trainNetwork(s, readout, h_fc1, sess):
    
    a = tf.placeholder("float", [None, 5])
    y = tf.placeholder("float", [None])
    readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)
    cost = tf.reduce_mean(tf.square(y - readout_action))
    train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)

    game_state = game.GameState()
    D = deque()
    s_t = init_env_data(game_state)
    saver = save_and_load_network(sess)
    epsilon = INITIAL_EPSILON
    terminal = False
    t = 0
    
    while True:
        readout_t = readout.eval(feed_dict={s : [s_t]})[0]
        a_t = np.zeros([5])
        action_index = 0

        if t % FRAME_PER_ACTION == 0:
            act_with_greedy_policy(epsilon, readout_t, a_t)
        else:
            a_t[0] = 1 # do nothing

        if epsilon > FINAL_EPSILON and t > OBSERVE:
            epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE

        s_t1, r_t, terminal = update_env_by_action(game_state, s_t, a_t)
        
        D.append((s_t, a_t, r_t, s_t1, terminal))

        if len(D) > REPLAY_MEMORY:
            D.popleft()


        if t > OBSERVE:
            # D 큐에서 학습에 필요한  데이터를 샘플링함
            minibatch = random.sample(D, BATCH)
            train_network_by_batch(minibatch, readout, train_step, s, a, y)
            
        s_t = s_t1
        t += 1
        
        # save progress every 10000 iterations
        if t % 10000 == 0:
            saver.save(sess, 'save_networks/' + GAME + '-dqn', global_step = t)
        
        print_info(t, epsilon, action_index, r_t, readout_t)
コード例 #7
0
ファイル: play.py プロジェクト: man-of-code/Learner
def play(model):

    score = 0
    game_state = game.GameState()
    _, state = game_state.frame_step((2))

    while True:
        score += 1

        action = np.argmax(model.predict(state, batch_size=1))

        _, state = game_state.frame_step(action)

        if score % 1000 == 0:
            print("Current Score : %d frames." % score)
コード例 #8
0
 def protoGameToGame(self):
     g = game.GameState()
     for team in self.proto.game.teams:
         t = game.Team(team.teamID)
         g.teams[team.teamID] = t
         for soldier in team.soldiers:
             t.soldiers[soldier.soldierID] = protoSoldierToSoldier(soldier)
     g.turnNumber = self.proto.game.turnNumber
     g.activeTeamID = self.proto.game.activeTeamID
     g.activeSoldierID = self.proto.game.activeSoldierID
     g.battlefield = game.Battlefield(self.proto.game.battlefield.width,
                                      self.proto.game.battlefield.height)
     for x, t in enumerate(self.proto.game.battlefield.tiles):
         g.battlefield.array[x // self.proto.game.battlefield.height][
             x % self.proto.game.battlefield.width] = t.tile
     return g
コード例 #9
0
ファイル: creation.py プロジェクト: anttisalonen/spaeher
def generateGameState(config):
    random.seed(21)
    g = game.GameState(100)
    getpos = lambda x, y: config.getInitialSoldierPosition(x, y)
    for x in xrange(config.numTeams):
        t = game.Team(x)
        t.generateSoldiers(config.numSoldiers[x], getpos)
        g.teams[t.teamID] = t
    g.activeSoldierID = g.teams[g.activeTeamID].soldiers[0].soldierID

    g.battlefield = game.Battlefield(config.bfwidth, config.bfheight)
    for x in xrange(g.battlefield.width):
        for y in xrange(g.battlefield.height):
            g.battlefield.array[x][y] = random.randint(game.Tile.grass,
                                                       game.Tile.tree)

    return g
コード例 #10
0
ファイル: project5.py プロジェクト: poorvanene/columns
    def run(self) -> None:
        '''main user interface'''
        T = (127, 255, 212)
        R = (255, 64, 64)
        P = (255, 20, 147)
        Y = (255, 215, 0)
        G = (69, 139, 0)
        B = (100, 149, 237)
        L = ((191, 62, 255))

        grid = game.startgrid()
        g = game.GameState()
        f = self.randomfaller()
        self.f = f
        faller = game.Faller(f[0], grid, f[1], f[2], f[3], g)
        self.faller = faller
        pygame.init()
        self.resizesurface((750, 750))
        clock = pygame.time.Clock()

        t = 0
        while self.running:
            clock.tick(200)
            if self.faller.state == 'done':
                surface = pygame.display.get_surface()
                surface.fill(pygame.Color(0, 0, 0))
                pygame.font.init()
                myfont = pygame.font.SysFont('Arial', 100, 1, 1)
                textsurface = myfont.render('GAME OVER', True, (255, 215, 0))
                surface.blit(textsurface, (137, 205))
                pygame.display.flip()
            self.handle_events()
            if t % 60 == 0:
                if self.faller.state == 'ready':
                    newgrid = self.faller.copy
                    f = self.randomfaller()
                    self.f = f
                    faller = game.Faller(f[0], newgrid, f[1], f[2], f[3], g)
                    self.faller = faller
                self.faller.tick()
                self.redraw(self.faller.copy)
            t = t + 1

        pygame.quit()
コード例 #11
0
def play_game(drawless, player_controllers):
    game = gofish.GameState(player_controllers, drawless=drawless)

    player_1_name = player_controllers[0].get_name()
    player_2_name = player_controllers[1].get_name()
    if has_human:
        print(f'Player 1 played by {player_1_name}')
        print(f'Player 2 played by {player_2_name}')

    while game.winner == -1:
        game.step()
    if game.winner != 0:
        if has_human:
            print(
                f'Winner: Player {game.winner}, {player_controllers[game.winner-1].get_name()}'
            )
        player_controllers[game.winner - 1].wins += 1
    elif has_human:
        print('Draw!')
    return game.winner
コード例 #12
0
    def tick(self, controller):
        # Update all objects in the scene
        #
        scene_itr = self.scene.copy()
        for (_, obj) in scene_itr.items():
            obj.update(self.scene, controller)

        self.pause += 1
        if self.pause > 50:
            if controller.key_pressed(C_TRIGGER):
                return game.GameState()

            if controller.key_pressed(K_h):
                return highscore.HighScoreState(0, 0)

            if controller.key_pressed(K_c):
                return credits.CreditsState()

        # stop asteroids from running out
        if self.pause > 3600:
            self.reset()
コード例 #13
0
def main():
    global logger
    global game_state
    game_state = game.GameState(1, 1)
    #tornado.options.parse_command_line()

    parse_args()

    if args.verbose:
        tornado.options.enable_pretty_logging()
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)

    application = tornado.web.Application([
        (r"/data0", DataHandler0),
        (r"/data1", DataHandler1),
        (r"/status", StatusDataHandler),
    ], )

    print "Listening on %s:%s" % (args.listen_interface, args.listen_port)
    application.listen(args.listen_port, args.listen_interface)
    tornado.ioloop.IOLoop.instance().start()
コード例 #14
0
    def __init__(self, window, players=[game.GamePlayer(), game.GamePlayer()]):
        self.state = game.GameState()
        self.state.new_game()
        self.click_state = 0
        self.color_to_move = color_translate(self.state.next_player)
        self.first_point = None
        self.first_line = None
        self.second_point = None
        self.second_line = None
        self.areas = None
        self.area_choice = None

        self.game_area = self.state.get_game_area()

        self.area_cache = []
        self.last_area = None
        self.hover_score = 0.0

        self.window = window
        self.zoom = 50
        self.pan = [100, 125]

        self.intro_state = 0
        self.players = players
コード例 #15
0
def start(mode, mp, mn, use_model):
    if mode not in ['train', 'test']:
        raise ValueError('Unknown mode!')
    '''
        Configuration
    '''
    observe_steps = 5
    memory_size = 1000
    epoch = 5000  # Game
    use_pretrained_model = use_model
    save_model_path = mp
    save_model_name = mn
    pretrained_model_path = save_model_path + save_model_name
    log_path = './log/'
    init_epsilon = 0.1
    final_epsilon = 0.0001
    frame_per_action = 1
    epsilon = init_epsilon if mode is 'train' else 0
    init_learning_rate = 1e-6
    batch_size = 2
    start_epoch = 1
    gamma = 0.99
    history_size = 4
    max_window_size = 128
    random.seed(1e6)
    use_expectimax = True

    # for expectiMax
    # ex_max: detail data of expectiMax
    ex_max = np.zeros((1, 6))
    # height_proba: the probability of up action and down action.
    height_proba = np.zeros((1, 3))
    #
    step = 60

    if os.path.exists(save_model_path) is False:
        os.makedirs(save_model_path)
    if os.path.exists(log_path) is False:
        os.makedirs(log_path)
    '''
        Build the network
    '''
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    net = DQN().to(device).float()
    loss_func = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(net.parameters(),
                                lr=init_learning_rate,
                                momentum=0.9)
    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = 1000, gamma = 0.1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[500, 2000],
                                                     gamma=0.5)
    # Read the pretrained model
    if use_pretrained_model:
        checkpoint = torch.load(pretrained_model_path)
        net.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        start_epoch = checkpoint['epoch'] + 1
        # Cuda
        if torch.cuda.is_available():
            for state in optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.cuda()
        print("Load the pretrained model from %s successfully!" %
              pretrained_model_path)
    else:
        weight_init(net)
        print("First time training!")
    '''
        Data structures
    '''
    flappybird = game.GameState()
    memory_replay = deque()
    # No action
    aidx = 0
    action = np.zeros([2], dtype=np.float32)
    action[aidx] = 1
    img, reward, terminate = flappybird.frame_step(aidx)
    curr_height = flappybird.playery
    img = bgr2gray(img)
    # img_seq: 4x84x84
    img_seq = np.stack([img for _ in range(history_size)], axis=0)
    '''
        Start the game
    '''
    # Train DQN
    if mode is 'train':
        net.train()
        stage = 'OBSERVE'
        writer = SummaryWriter(log_path)
        print('Start training...')
        for e in range(start_epoch, epoch + start_epoch):
            per_game_memory = deque()
            while True:
                # img_seq_ts = data2tensor(img_seq, device).unsqueeze(0)
                img_seq_ts = Variable(
                    torch.from_numpy(img_seq).unsqueeze(0).to(device))
                pred = net(img_seq_ts)

                # Take an action
                idx, action = 0, np.zeros([actions], dtype=np.float32)
                if e % frame_per_action == 0:
                    if stage is 'OBSERVE':
                        idx = 0 if random.random() < 0.9 else 1
                    else:
                        # Epsilon greedy policy
                        if random.random() <= epsilon:
                            idx = 0 if random.random() < 0.9 else 1
                        else:
                            idx = torch.argmax(pred, dim=1).item()
                else:
                    idx = 0
                action[idx] = 1

                # Scale down epsilon
                epsilon -= (init_epsilon - final_epsilon) / 2000000

                # Run an action
                img_next, reward, terminate = flappybird.frame_step(idx)
                curr_height = flappybird.playery
                # img_next = data2tensor(bgr2gray(img_next), device).unsqueeze(0)
                img_next = bgr2gray(img_next)
                img_seq_next = np.stack(
                    [img_next, img_seq[0], img_seq[1], img_seq[2]], axis=0)

                # Update the memory
                # memory_replay.append([img_seq, img_seq_next, action, reward, terminate])
                per_game_memory.append([
                    img_seq, img_seq_next, action, reward, terminate,
                    curr_height
                ])
                # if len(memory_replay) > memory_size:
                #     memory_replay.popleft()

                if e <= start_epoch + observe_steps and e % 1000 == 0:
                    print('Finish %d observations!' % e)

                # Train after observation
                if e > start_epoch + observe_steps:
                    stage = 'TRAINING'
                    # Get all history of 'batch_size' games
                    batch = random.sample(memory_replay, batch_size)

                    # Get the game history
                    img_seq_b, img_seq_next_b, action_b, reward_b, terminate_b, curr_height_b = [], [], [], [], [], []
                    for b in batch:
                        ib, inb, ab, rb, tb, chb = zip(*b)
                        img_seq_b.append(ib)
                        img_seq_next_b.append(inb)
                        action_b.append(ab)
                        reward_b.append(rb)
                        terminate_b.append(tb)
                        curr_height_b.append(chb)
                    reward_b_window = np.concatenate(reward_b, axis=0)
                    terminate_b_window = np.concatenate(terminate_b, axis=0)

                    # Randomly sample if there are too many frames in total.
                    all_frames_num = terminate_b_window.shape[0]
                    window = np.array(
                        random.sample(list(range(all_frames_num)),
                                      max_window_size)
                    ) if all_frames_num > max_window_size else np.array(
                        list(range(all_frames_num)))
                    reward_b_window = np.take(reward_b_window, window, axis=0)
                    terminate_b_window = np.take(terminate_b_window,
                                                 window,
                                                 axis=0)

                    # Get the image states
                    img_seq_b_ts = Variable(
                        torch.from_numpy(
                            np.take(np.concatenate(img_seq_b, axis=0),
                                    window,
                                    axis=0)).to(device))
                    img_seq_next_b_ts = Variable(
                        torch.from_numpy(
                            np.take(np.concatenate(img_seq_next_b, axis=0),
                                    window,
                                    axis=0)).to(device))

                    if use_expectimax is True:
                        s_mat = terminate_b[:][:-1]
                        h_mat = curr_height_b[:][:-1]
                        a_mat = action_b[:][:-1]
                        ex_max, height_proba = getExpectimax(
                            ex_max, height_proba, s_mat, h_mat, a_mat, step)
                        action_b_ts = torch.from_numpy(
                            getHeightDecision(ex_max, curr_height)).to(device)
                    else:
                        action_b_window = np.take(np.concatenate(action_b,
                                                                 axis=0),
                                                  window,
                                                  axis=0)
                        action_b_ts = torch.from_numpy(
                            np.array(action_b_window)).to(device)

                    # Predict
                    out = net(img_seq_b_ts)
                    out_next = net(img_seq_next_b_ts)
                    # Calculate y and q value
                    y_b = []
                    for r, t, p in zip(reward_b_window, terminate_b_window,
                                       out_next):
                        if t:
                            y_b.append(r)
                        else:
                            y_b.append(r + gamma * torch.max(p).item())
                    y_b = torch.from_numpy(np.array(
                        y_b, dtype=np.float32)).to(device)
                    q_value_b = torch.sum(out * action_b_ts, dim=1)

                    # Calculate loss and back propagation
                    optimizer.zero_grad()
                    loss = loss_func(q_value_b, y_b)
                    loss.backward()
                    optimizer.step()

                    # Print information
                    print(
                        'Epoch %d: stage = %s, loss = %.6f, Q_max = %.6f, action = %d, reward = %.3f'
                        % (e, stage, loss.item(), torch.max(pred).item(), idx,
                           reward))
                    writer.add_scalar('Train/Loss', loss.item(), e)
                    writer.add_scalar('Train/Epsilon', epsilon, e)
                    writer.add_scalar('Train/Reward', reward, e)
                    writer.add_scalar('Train/Q-Max', torch.max(pred).item(), e)

                img_seq = img_seq_next

                scheduler.step()

                if terminate:
                    break

            # Save model
            if e % 50 == 0:
                states = {
                    'epoch': e,
                    'state_dict': net.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict()
                }
                torch.save(states, save_model_path + save_model_name)
                print('Save the model at epoch %d successfully!' % e)

            # Update the memory
            memory_replay.append(per_game_memory)
            if len(memory_replay) > memory_size:
                memory_replay.popleft()

    # Test DQN
    else:
        net.eval()
        print('Start testing...')
        game_num = 25
        game_score = 0
        eval_best_score = 0
        all_scores = []
        for e in range(game_num):
            terminate = False
            game_score = 0
            while not terminate:
                with torch.no_grad():
                    img_seq_ts = Variable(
                        torch.from_numpy(img_seq).unsqueeze(0).to(device))
                    pred = net(img_seq_ts)
                    idx = torch.argmax(pred, dim=1).item()
                    img_next, reward, terminate = flappybird.frame_step(idx)
                    img_next = bgr2gray(img_next)
                    img_seq = np.stack(
                        [img_next, img_seq[0], img_seq[1], img_seq[2]], axis=0)
                    score = flappybird.score
                    if score > game_score:
                        game_score = score
                    if score > eval_best_score:
                        eval_best_score = score
            print('The score at game %d is %d.' % (e, game_score))
            all_scores.append(game_score)
        print('The best score is %d.' % eval_best_score)
        print('The average score is %.3f.' % np.mean(all_scores))
コード例 #16
0
ファイル: learning.py プロジェクト: ratik21/Learner
def train_net(model, neural):

    filename = str(neural['network'][0]) + '-' + str(neural['network'][1]) + '-' + \
    		str(neural['batchSize']) + '-' + str(neural['buffer'])

    observe = 1000  
    epsilon = 1
    train_frames = 100000  
    batchSize = neural['batchSize']
    buffer = neural['buffer']

    max_distance = 0
    ob_distance = 0
    t = 0
    data_collect = []
    replay = []
    loss_log = []
    game_state = game.GameState()
    _, state = game_state.frame_step((2))
    start_time = timeit.default_timer()

    while t < train_frames:

        t += 1
        ob_distance += 1

        if random.random() < epsilon or t < observe:
            action = num.random.randint(0, 3) 
        else:
            qval = model.predict(state, batch_size=1)
            action = (num.argmax(qval)) 

        reward, new_state = game_state.frame_step(action)

        replay.append((state, action, reward, new_state))

        if t > observe:

            if len(replay) > buffer:
                replay.pop(0)

            minibatch = random.sample(replay, batchSize)

            # Get training values. catastrophic...................................
            X_train, y_train = process_minibatch(minibatch, model)

            history = LossHistory()
            model.fit(
                X_train, y_train, batch_size=batchSize,
                nb_epoch=1, verbose=0, callbacks=[history]
            )
            loss_log.append(history.losses)

        # Update the starting state with S'.
        state = new_state

        # Decrement epsilon over time.
        if epsilon > 0.1 and t > observe:
            epsilon -= (1.0/train_frames)

        if reward == -500:
            data_collect.append([t, ob_distance])

            if ob_distance > max_distance:
                max_distance = ob_distance

            tot_time = timeit.default_timer() - start_time
            fps = ob_distance / tot_time

            print("Max Score: %d at %d\tepsilon %f\t(%d)\t" %
                  (max_distance, t, epsilon, ob_distance))
            ob_distance = 0
            start_time = timeit.default_timer()

        if t % 25000 == 0:
            model.save_weights('models/' + filename + '-' +
                               str(t) + '.h5',
                               overwrite=True)
            print("Model Saved %s - %d" % (filename, t))

    log(filename, data_collect, loss_log)
コード例 #17
0
    async def on_message(self, message):
        """PRINT THE MESSAGE, IF THE MESSAGE DOES NOT START WITH PREFIX OR THE MESSAGE WAS FROM THE BOT ITSELF, RETURN"""

        print('Message from {0.author}: {0.content}'.format(message))
        if not message.content.startswith(
                "'") or message.author == client.user:
            return
        """UTILITY FUNCTIONS - KILL AND RESET GAME (TAKE PRIORITY)"""

        if message.content == "'kill":
            await message.channel.send(
                f"{message.author.mention}, I am quitting, goodbye!")
            raise SystemExit  # hacky way of quiting application
            return
        elif message.content == "'reset":  # reset now works
            global game_state
            game_state = game.GameState()
            await message.channel.send(
                f"{message.author.mention}, the game has been reset.")
        """ START NOT NEEDED """

        if game_state.timer is not None:
            time_elapsed = time.time() - game_state.timer
            if is_equal(message, "dukeblock"):
                pass
            elif is_equal(message, "continue"):
                if time_elapsed > 3:
                    game_state.timer = None
                    prev = ""
                    message.channel.send(f"")
            else:
                message.channel.send(
                    f"Sorry {message.author.mention}, that is not a valid command (must be 'dukeblock or 'continue) at this time due to a countdown initiated."
                )
        """
        print("mentions",message.mentions)
        """
        """GAME NOT STARTED"""

        if not game_state.started:
            if is_equal(message, "join"):
                found = False
                for player in game_state.players:
                    if player.discord_tag == message.author:
                        found = True
                        break
                output = ""
                if not found:  #successfully joined
                    game_state.players.append(
                        game.Player(message.author, game_state))
                    output += message.author.mention + ", added to the game.\n"
                    output += "Players: " + str(len(game_state.players))
                else:  #already joined
                    output += message.author.mention + ", you are already added.\n"
                    output += "Players: " + str(len(game_state.players))
                await message.channel.send(output)

            elif is_equal(message, "players"):
                if len(game_state.players) == 0:
                    await message.channel.send(
                        f"{message.author.mention}, there are no players in the game currently."
                    )
                    return
                output = ""
                output += f"{message.author.mention}, here are the list of players:\n"
                for player in game_state.players:
                    output += player.discord_tag.mention + "\n"
                await message.channel.send(output)
            elif is_equal(message, "start") and not game_state.started:
                game_state.channel = message.channel
                game_state.start_game()
                await message.channel.send(
                    "Game has started. Check your DMs. Player order: ")
                for player in game_state.players:
                    await message.channel.send(player.discord_tag.mention)
                    game_state.add_cards(2, player)
                    dm = await player.discord_tag.create_dm()
                    await dm.send(
                        f"**DiscordCoup - NEW GAME STARTED**\nCards: {player.get_cards()}\nCoins: {player.coins}"
                    )
            return
        """GAME IS STARTED"""

        if game_state.started:
            """COINS"""

            if is_equal(message, "coins"):
                output = "Coins: \n"
                for player in game_state.players:
                    output += f"{player.discord_tag.mention}: {player.coins}\n"
                await message.channel.send(output)
            """WAITING FOR ACTION"""

            if game_state.waiting_for_action:
                """If not player's turn"""

                if game_state.players[
                        game_state.curr_player].discord_tag != message.author:
                    await message.channel.send("Not your turn.")
                    return
                elif is_equal(message, "income"):  #income
                    if game_state.players[
                            game_state.curr_player].is_above_ten():
                        await message.channel.send(
                            "10+ coins, must perform Coup.")
                        return
                    game_state.players[game_state.curr_player].invoke_income()
                    await message.channel.send(
                        f"{message.author.mention} incomed. Coins: {game_state.players[game_state.curr_player - 1].coins}"
                    )
                    game_state.waiting_for_action = True
                elif is_equal(message, "foreignaid"):  #foreign aid
                    await message.channel.send(
                        f"{message.author.mention} claimed foreign aid. Challenge with 'c, or pass with 'p."
                    )
                    game_state.prev = "foreginaid"
                    game_state.waiting_for_action = False
                    return
                elif is_equal(message,
                              "coup") and len(message.mentions) != 1:  #coup
                    await message.channel.send("Command is: 'Coup @player")
                    return
                elif is_equal(message, "coup") and len(message.mentions) == 1:
                    if game_state.tag_to_player(message.mentions[0]) is None:
                        await message.channel.send(
                            f"{message.mentions[0].mention} is not in the game."
                        )
                        return
                    elif message.mentions[0] == message.author:
                        await message.channel.send(f"Cannot Coup yourself.")
                        return
                    elif game_state.players[game_state.curr_player].coins < 7:
                        await message.channel.send(
                            f"Need 7 coins, you have: {game_state.players[game_state.curr_player].coins}."
                        )
                        return
                    dm = await message.mentions[0].create_dm()
                    await dm.send(
                        f"You have been Couped. Discard a card. Type 'discard 0, etc.\nCurrent cards: {game_state.tag_to_player(message.mentions[0]).cards}."
                    )
                    game_state.in_conflict = message.mentions[0]
                    game_state.tag_to_player(message.author).coup()
                    await message.channel.send(
                        f"{message.author.mention} Coups --> {message.mentions[0].mention}"
                    )
                    game_state.waiting_for_action = False
                elif is_equal(message, "duke"):
                    game_state.tag_to_player(message.author).invoke_duke()
                    await message.channel.send(
                        f"{message.author.mention} calls Duke.")
                    game_state.waiting_for_action = False
                return
            """ SOMEONE IS BEING COUPED BUT SOMEONE TALKED WHO WASN'T BEING COUPOED """
            """NOT WAITING FOR ACTION"""

            if not game_state.waiting_for_action:
                """when someone does action that can be called"""
                if game_state.waiting_for_permissions:
                    if is_equal(message, "c"):  #if someone challenges
                        await message.channel.send(
                            f"{message.author.mention} challenges")
                        game_state.waiting_for_permissions = False
                        game_state.waiting_for_action = True
                        game_state.next_turn
                        return
                    elif is_equal(message,
                                  "p"):  #if someone passed, check if added
                        found = False
                        for player in game_state.accepted_list:
                            if player.discord_tag == message.author:
                                found = True
                                break
                        output = ""
                        if not found:  #add to accepted list
                            game_state.accepted_list.append(
                                game_state.tag_to_player(message.author))
                            await message.channel.send("Passed.")
                            #check if all players has passed
                            await message.channel.send(
                                len(game_state.accepted_list))
                            await message.channel.send(len(game_state.players))
                            if len(game_state.players) == len(
                                    game_state.accepted_list):
                                await message.channel.send(
                                    "Everyone passed, action allowed.")
                                game_state.waiting_for_permissions = False
                                game_state.waiting_for_action = True
                                game_state.next_turn
                            return

                        else:  #already accepted
                            output += message.author.mention + ", you are already added.\n"
                            await message.channel.send(
                                "You have already passed.")
                            return
                """Check player's turn"""
                """game_state.in_conflict is not None and"""

                if game_state.in_conflict != message.author:
                    await message.channel.send("Wait your turn.")
                    return
                else:
                    # TODO: implement card debt: if game_state.tag_to_player(message.author).zero_less_cards():
                    if len(message.content.split()) != 2:
                        await message.channel.send("Type 'discard [number].")
                        return
                    index_to_discard = message.content.split()[1]
                    if not index_to_discard.isdigit():
                        await message.channel.send("Type 'discard [number].")
                        return
                    index_to_discard = int(index_to_discard)
                    if 0 <= index_to_discard <= 1:  # the input was successful
                        name_of_discarded = game_state.tag_to_player(
                            message.author).cards[index_to_discard].name
                        game_state.tag_to_player(message.author).turn_over(
                            game_state.tag_to_player(
                                message.author).cards[index_to_discard])
                        await message.channel.send(
                            f"{message.author.mention} discarded {name_of_discarded}"
                        )
                        game_state.next_turn()
                        game_state.in_conflict = None
                        game_state.waiting_for_action = True
                        print(game_state.tag_to_player(message.author).cards)
                        return
                    else:
                        await message.channel.send("Invalid number")
                        return
コード例 #18
0
def handle_message(event):
    if(event.source.type == 'user'):
        id = event.source.user_id
        profile = line_bot_api.get_profile(id)
    elif(event.source.type == 'room'):
        id = event.source.room_id
        profile = line_bot_api.get_room_member_profile(id, event.source.user_id)
    elif(event.source.type == 'group'):
        id = event.source.group_id
        profile = line_bot_api.get_group_member_profile(id, event.source.user_id)

    command = commandParser.splitCommand(event.message.text)
    if(command == None):
        return None

    if id in stateList:
        state = stateList[id]
    else:
        state = game.GameState()
        stateList[id] = state

    reply = []
    name = profile.display_name

    if(command == 'play'):
        ret = game.gameStart(state, id)
        if(ret == 0):
            reply.append(TextSendMessage(text="== Start Game ==\nWho's That Pokemon?\nStart command with colon\n:play = play game\n:end = end game\n:score = view score\n:<awnser> = awnser Ex. :pikachu"))
        elif(ret == 1):
            reply.append(TextSendMessage(text="@%s\nGame already started"%name))
            line_bot_api.reply_message(
                event.reply_token, reply)
            return None
        elif(ret == 2):
            reply.append(TextSendMessage(text="@%s\nOther room playing, Please Wait"%name))
            line_bot_api.reply_message(
                event.reply_token, reply)
            return None
    elif(command == 'end'):
        ret = game.gameEnd(state, id)
        if(ret == 0):
            winner = ""
            winScore = 0
            text = "== End Game =="
            for key in state.score:
                text = text+"\n%s --> %d Points"%(key,state.score[key])
                if(state.score[key] > winScore):
                    winScore = state.score[key]
                    winner = key
            text = text + "\nThe winner is [%s] !"%winner
            reply.append(TextSendMessage(text=text))
            line_bot_api.reply_message(
                event.reply_token, reply)
            return None

    if(state.progress == 1):
        game.getQuestion(state)
        reply.append(ImageSendMessage(original_content_url=state.path, preview_image_url=state.path))
        reply.append(TextSendMessage(text=state.awnsered))
    elif(state.progress == 2):
        if(command == 'score'):
            text="=== Score ==="
            for key in state.score:
                text = text+"\n%s --> %d Points"%(key,state.score[key])
            reply.append(TextSendMessage(text=text))
            line_bot_api.reply_message(
                event.reply_token, reply)
            return None

        ret = game.awnserQuestion(state, command, name, id)
        if(ret == 1):
            reply.append(TextSendMessage(text="@%s\nOther room playing, Please Wait"%name))
            line_bot_api.reply_message(
                event.reply_token, reply)
            return None

        text = "@%s\n"%name + state.awnsered
        if(game.isCorret(state)):
            text = text + " --> CORRECT!\nScore = %d Points\nNext Question"%state.score[name]
            reply.append(StickerSendMessage(package_id='2', sticker_id='144'))
            reply.append(TextSendMessage(text=text))
            if(state.isEnd == 1):
                ret = game.gameEnd(state, id)
                if(ret == 0):
                    winner = ""
                    winScore = 0
                    text = "== End Game =="
                    for key in state.score:
                        text = text+"\n%s --> %d Points"%(key,state.score[key])
                        if(state.score[key] > winScore):
                            winScore = state.score[key]
                            winner = key
                    text = text + "\nThe winner is [%s] !"%winner
                    reply.append(TextSendMessage(text=text))
                    line_bot_api.reply_message(
                        event.reply_token, reply)
                    return None

            game.gameRestart(state)
            game.getQuestion(state)
            reply.append(ImageSendMessage(original_content_url=state.path, preview_image_url=state.path))
            reply.append(TextSendMessage(text=state.awnsered))
        else:
            text = text + " --> WRONG!"
            if(state.hint == 1):
                text = text + " --> HINT!"
            reply.append(TextSendMessage(text=text))
    if(len(reply) > 0):
        line_bot_api.reply_message(
            event.reply_token, reply)
コード例 #19
0
# The main loop
while mode != game.Mode.exiting:
    # VERY IMPORTANT: If the game state changes in class,
    # but not outside, queue the state for deletion
    # and delete it.
    if states[-1].state != mode:
        # TURN THEM
        mode = states[-1].state

        # Checks them ONE BY ONE
        if states[-1].state == game.Mode.menu:
            # Check for menu
            states.append(game.MenuState(surface, rmanager))
        elif states[-1].state == game.Mode.freeplay:
            # Check for playing
            states.append(game.GameState(surface, rmanager,
                                         game.Mode.freeplay))
        elif states[-1].state == game.Mode.sandbox:
            # Check for sandbox mode
            states.append(game.GameState(surface, rmanager, game.Mode.sandbox))
        elif states[-1].state == game.Mode.exiting:
            # Check for exit
            pass
        else:
            # Error: unknown/unimplemented state
            states.append(
                game.ErrorState(
                    surface, rmanager,
                    'Error: Unimplemented state. Please contact author of program.'
                ))

        # Remove the LONER (safety issue)
コード例 #20
0
def trainNetwork(s, readout, h_fc1, sess):
    # define the cost function
    a = tf.compat.v1.placeholder("float", [None, ACTIONS])
    y = tf.compat.v1.placeholder("float", [None])
    readout_action = tf.reduce_sum(tf.multiply(readout, a),
                                   reduction_indices=1)
    cost = tf.reduce_mean(tf.square(y - readout_action))
    train_step = tf.compat.v1.train.AdamOptimizer(1e-6).minimize(cost)

    # open up a game state to communicate with emulator
    game_state = game.GameState()

    # store the previous observations in replay memory
    D = deque()

    # printing
    a_file = open("logs_" + GAME + "/readout.txt", 'w')
    h_file = open("logs_" + GAME + "/hidden.txt", 'w')

    # get the first state by doing nothing and preprocess the image to 80x80x4
    do_nothing = np.zeros(ACTIONS)
    do_nothing[0] = 1
    x_t, r_0, terminal = game_state.frame_step(do_nothing)
    x_t = cv2.cvtColor(cv2.resize(x_t, (80, 80)), cv2.COLOR_BGR2GRAY)
    ret, x_t = cv2.threshold(x_t, 1, 255, cv2.THRESH_BINARY)
    s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)

    # saving and loading networks
    saver = tf.train.Saver()
    sess.run(tf.initialize_all_variables())
    checkpoint = tf.train.get_checkpoint_state("saved_networks")
    if checkpoint and checkpoint.model_checkpoint_path:
        saver.restore(sess, checkpoint.model_checkpoint_path)
        print("Successfully loaded:", checkpoint.model_checkpoint_path)
    else:
        print("Could not find old network weights")

    # start training
    epsilon = INITIAL_EPSILON
    t = 0
    while "flappy bird" != "angry bird":
        # choose an action epsilon greedily
        readout_t = readout.eval(feed_dict={s: [s_t]})[0]
        a_t = np.zeros([ACTIONS])
        action_index = 0
        if t % FRAME_PER_ACTION == 0:
            if random.random() <= epsilon:
                print("----------Random Action----------")
                action_index = random.randrange(ACTIONS)
                a_t[random.randrange(ACTIONS)] = 1
            else:
                action_index = np.argmax(readout_t)
                a_t[action_index] = 1
        else:
            a_t[0] = 1  # do nothing

        # scale down epsilon
        if epsilon > FINAL_EPSILON and t > OBSERVE:
            epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE

        # run the selected action and observe next state and reward
        x_t1_colored, r_t, terminal = game_state.frame_step(a_t)
        x_t1 = cv2.cvtColor(cv2.resize(x_t1_colored, (80, 80)),
                            cv2.COLOR_BGR2GRAY)
        ret, x_t1 = cv2.threshold(x_t1, 1, 255, cv2.THRESH_BINARY)
        x_t1 = np.reshape(x_t1, (80, 80, 1))
        #s_t1 = np.append(x_t1, s_t[:,:,1:], axis = 2)
        s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2)

        # store the transition in D
        D.append((s_t, a_t, r_t, s_t1, terminal))
        if len(D) > REPLAY_MEMORY:
            D.popleft()

        # only train if done observing
        if t > OBSERVE:
            # sample a minibatch to train on
            minibatch = random.sample(D, BATCH)

            # get the batch variables
            s_j_batch = [d[0] for d in minibatch]
            a_batch = [d[1] for d in minibatch]
            r_batch = [d[2] for d in minibatch]
            s_j1_batch = [d[3] for d in minibatch]

            y_batch = []
            readout_j1_batch = readout.eval(feed_dict={s: s_j1_batch})
            for i in range(0, len(minibatch)):
                terminal = minibatch[i][4]
                # if terminal, only equals reward
                if terminal:
                    y_batch.append(r_batch[i])
                else:
                    y_batch.append(r_batch[i] +
                                   GAMMA * np.max(readout_j1_batch[i]))

            # perform gradient step
            train_step.run(feed_dict={y: y_batch, a: a_batch, s: s_j_batch})

        # update the old values
        s_t = s_t1
        t += 1

        # save progress every 10000 iterations
        if t % 10000 == 0:
            saver.save(sess, 'saved_networks/' + GAME + '-dqn', global_step=t)

        # print info
        state = ""
        if t <= OBSERVE:
            state = "observe"
        elif t > OBSERVE and t <= OBSERVE + EXPLORE:
            state = "explore"
        else:
            state = "train"

        print("TIMESTEP", t, "/ STATE", state, \
            "/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, \
            "/ Q_MAX %e" % np.max(readout_t))
        # write info to files
        '''
コード例 #21
0
import game, defs

state = game.GameState()
state.draw_board()

while not state.game_ended():
    turn_str = "White" if state.turn == defs.WHITE else "Black"
    move_str = input(turn_str + " to move: ")
    move = state.parse_check_move(move_str)
    state.move_piece(move)
    state.draw_board()

print("Game over")
コード例 #22
0
ファイル: common.py プロジェクト: bjones3/webMinigame
 def save_state(self, slug, gs_data):
     assert gs_data.get('password') is not None
     self._data[slug] = game.GameState(dict(gs_data))