def erase_column(self, player): ''' Parameters ---------- player : PLAYER player with a column deletion in his board Returns ------- None. Removes a column if 3 card values in the same column (not hidden) are equal ''' flag = False for i in range(4): if player.board[0][i].value == player.board[1][ i].value == player.board[2][i].value: if player.board[0][i].hidden == player.board[1][ i].hidden == player.board[2][i].hidden and not ( player.board[2][i].hidden): if player.board[0][i].value != 0: card1 = Card(player.board[0][i].value) card2 = Card(player.board[0][i].value) card3 = Card(player.board[0][i].value) self.defausse.append(card1) self.defausse.append(card2) self.defausse.append(card3) player.board[0][i].value, player.board[1][ i].value, player.board[2][i].value = 0, 0, 0 flag = True return flag
def create_initial_deck_and_market(): # Create the whole deck cards = [Card(c) for c in config.get('cards')] deck = Game.create_initial_deck(cards) # Initial market will be first 9 initial_market = Market(deck[:9]) rest = deck[9:] # Add the phase 3 card rest.append(Card(config.get('stage_three_card'))) return initial_market, rest
def setBoard(self): b = [] # Card[] t = [] for i in range(40): t.append(-1) # Add PropertyCards # for i in range(len(ApplicationContext().get_instance().getCards())): # b[ApplicationContext().get_instance().getCards()[i].getPosition()] = ApplicationContext().get_instance().getCards()[i] # t[ApplicationContext().get_instance().getCards()[i].getPosition()] = 0 # Add CommunityChestCards # for i in range(len(ApplicationContext().get_instance().getCommunityCardPositions())): #b[ApplicationContext().get_instance().getCommunityCardPositions()[i]] = CommandCard() #t[ApplicationContext().get_instance().getCommunityCardPositions()[i]] = 1 # Add ChanceCards # for i in range(len(ApplicationContext().get_instance().getChanceCardPositions())): # b[ApplicationContext().get_instance().getChanceCardPositions()[i]] = CommandCard() # t[ApplicationContext().get_instance().getChanceCardPositions()[i]] = 2 b = [Card() for i in range(40)] # empty card array t = [-1] * 40 # int array of -1 # Specify that every position left on board is a special position ( GO, Jail, etc... ) # We'll take care of what occurs on every case in a different method for i in range(len(b)): if t[i] < 0: t[i] = 3 b[i] = SpecialPositionCard()
def initialiseBoard(self): b = [Card() for i in range(40)] # empty card array t = [-1] * 40 # int array of -1 # Add PropertyCards for i in range(len(self.getCards())): b[int(self.getCards()[i].getPosition())] = self.getCards()[i] t[int(self.getCards()[i].getPosition())] = 0 # Add CommunityChestCards for i in range(len(self.getCommunityCardPositions())): b[int(self.getCommunityCardPositions()[i])] = CommandCard() t[int(self.getCommunityCardPositions()[i])] = 1 # Add ChanceCards for i in range(len(self.getChanceCardPositions())): b[int(self.getChanceCardPositions()[i])] = CommandCard() t[int(self.getChanceCardPositions()[i])] = 2 # Specify that every position left on board is a special position ( GO, Jail, etc... ) # We'll take care of what occurs on every case in a different method for i in range(len(b)): if t[i] < 0: t[i] = 3 b[i] = SpecialPositionCard() # Set the global board parameter self.setBoard(Board(b, t))
def reset(self): ''' Returns ------- List Observation Reset the Skyjo Environment and return an initial observation Basically doing the same thing as __init__ ''' self.defausse = [] self.history = [] self.cards_thrown = [] self.state = 0 self.reward = 0 self.not_done = False self.unfinished = True de = [-2] * 5 + [-1] * 10 + [0] * 15 + [ i for i in range(1, 13) for j in range(10) ] self.deck = [] self.deck_card = Card(5) self.columns_made = [] self.reward2 = 0 for u in de: self.deck.append(Card(u)) self.setup() if self.num_players == 1: L = [-2, 0, -2] + [-2] * 12 H = [12, 100, 12] + [12] * 12 self.observation_space = spaces.Box(low=np.array(L), high=np.array(H)) board_int, board_bool = self.players[0].get_board_as_int( self.mean_value_deck()) self.observation = np.concatenate((np.array( [self.defausse[-1].value, self.state, self.deck_card.value]), board_int)) #self.observation = np.concatenate((self.observation,board_bool)) elif self.num_players == 2: #L = [-2,0,-2]+[-2]*12+[0]*12+[-2]*12+[0]*12 #H = [12,100,12]+[12]*12+[1]*12+[12]*12+[1]*12 #L = [-2,0,-2]+[-2]*12+[-20,0] # only score of the opponent #H = [12,100,12]+[12]*12+[150,10] L = [ -2, 0, -2 ] + [-2] * 12 + [-2] * 12 # board of the opponent without booleans H = [12, 100, 12] + [12] * 12 + [12] * 12 self.observation_space = spaces.Box(low=np.array(L), high=np.array(H)) board_int, board_bool = self.players[0].get_board_as_int( self.mean_value_deck()) self.observation = np.concatenate((np.array( [self.defausse[-1].value, self.state, self.deck_card.value]), board_int)) #self.observation = np.concatenate((self.observation,board_bool)) board_int, board_bool = self.players[1].get_board_as_int( self.mean_value_deck()) self.observation = np.concatenate((self.observation, board_int)) #self.observation = np.concatenate((self.observation,board_bool)) #score = self.players[1].compute_score(self.mean_value_deck()) #tiles = len(self.players[1].undiscovered_tiles()) #self.observation = np.concatenate((self.observation,np.array([score,tiles]))) return self.observation
def __init__(self, players, human_mode=False): ''' Parameters ---------- players : List List containing player instances Returns ------- None. Initializes the Skyjo environment ''' self.players = players # List of players in the game # not_done is a Boolean, as a player's turn can be composed of 2 actions, # It is True if the player has not done all his action, False otherwise self.not_done = False self.drew = False self.num_players = len(players) # The number of players self.action_space = spaces.MultiDiscrete([2, 13]) self.take = 0 # Int representing a global of an action self.draw = 1 # Int representing a global of an action self.throw = 2 # Int representing a global of an action self.defausse = [] # The discard pile, list containing discarded cards self.deck_card = Card(5) # First deck_card self.reward = 0 # The reward self.state = 0 self.unfinished = True self.cards_thrown = [] self.human_mode = human_mode self.columns_made = [] self.reward2 = 0 # Deck card initialized as the real game de = [-2] * 5 + [-1] * 10 + [0] * 15 + [ i for i in range(1, 13) for j in range(10) ] self.deck = [] # The deck, list of cards composing the deck for u in de: self.deck.append(Card(u)) self.deck_copy = self.deck.copy() self.setup() # Call set up, initialize the env L = [-2, 0, -2] + [-2] * 12 H = [12, 100, 12] + [12] * 12 self.observation_space = spaces.Box(low=np.array(L), high=np.array(H)) # The observation space, created respecting GYM env if self.num_players == 1: L = [-2, 0, -2] + [-2] * 12 H = [12, 100, 12] + [12] * 12 self.observation_space = spaces.Box(low=np.array(L), high=np.array(H)) board_int, board_bool = self.players[0].get_board_as_int( self.mean_value_deck()) self.observation = np.concatenate((np.array( [self.defausse[-1].value, self.state, self.deck_card.value]), board_int)) #self.observation = np.concatenate((self.observation,board_bool)) elif self.num_players == 2: #L = [-2,0,-2]+[-2]*12+[0]*12+[-2]*12+[0]*12 # boolean boards and board of the opponent #H = [12,100,12]+[12]*12+[1]*12+[12]*12+[1]*12 #L = [-2,0,-2]+[-2]*12+[-20,0] # only score of the opponent #H = [12,100,12]+[12]*12+[150,10] L = [ -2, 0, -2 ] + [-2] * 12 + [-2] * 12 # board of the opponent without booleans H = [12, 100, 12] + [12] * 12 + [12] * 12 self.observation_space = spaces.Box(low=np.array(L), high=np.array(H)) board_int, board_bool = self.players[0].get_board_as_int( self.mean_value_deck()) self.observation = np.concatenate((np.array( [self.defausse[-1].value, self.state, self.deck_card.value]), board_int)) #self.observation = np.concatenate((self.observation,board_bool)) board_int, board_bool = self.players[1].get_board_as_int( self.mean_value_deck()) self.observation = np.concatenate((self.observation, board_int))
def __init__(self,players,human_mode = False): ''' Parameters ---------- players : List List containing player instances Returns ------- None. Initializes the Skyjo environment ''' self.players = players # List of players in the game self.not_done = False # Boolean, as a player's turn can be composed of 2 actions, True if the player has not done all his actions self.drew = False self.num_players = len(players) # The number of players self.action_space = spaces.MultiDiscrete([2,13]) self.take = 0 # Int representing a global of an action self.draw = 1 # Int representing a global of an action self.throw = 2 # Int representing a global of an action self.defausse = [] # The discard pile, list containing discarded cards self.deck_card = Card(5) # First deck_card self.reward = 0 # The reward self.state = 0 self.cards_thrown = [] self.testing = False self.cards_known = [] self.unfinished = True self.human_mode = human_mode self.columns_made = [] self.reward2 = 0 # Deck card initialized as the real game de = [-2]*5 + [-1]*10 + [0]*15 + [i for i in range(1,13) for j in range(10)] self.deck=[] # The deck, list of cards composing the deck for u in de: self.deck.append(Card(u)) self.deck_copy = self.deck.copy() self.setup() # Call set up, initialize the env # The observation space, created respecting GYM env if self.num_players == 1: L = [-2,0,-2]+[-2]*12 H = [12,100,12]+[12]*12 self.observation_space = spaces.Box(low=np.array(L),high=np.array(H)) board_int,board_bool = self.players[0].get_board_as_int(self.mean_value_deck()) self.observation = np.concatenate((np.array([self.defausse[-1].value,self.state,self.deck_card.value]),board_int)) #self.observation = np.concatenate((self.observation,board_bool)) elif self.num_players == 2: #L = [-2,0,-2]+[-2]*12+[0]*12+[-2]*12+[0]*12 #H = [12,100,12]+[12]*12+[1]*12+[12]*12+[1]*12 L = [-2,0,-2]+[-2]*12+[-2]*12 H = [12,100,12]+[12]*12+[12]*12 self.observation_space = spaces.Box(low=np.array(L),high=np.array(H)) board_int,board_bool = self.players[0].get_board_as_int(self.mean_value_deck()) self.observation = np.concatenate((np.array([self.defausse[-1].value,self.state,self.deck_card.value]),board_int)) #self.observation = np.concatenate((self.observation,board_bool)) board_int,board_bool = self.players[1].get_board_as_int(self.mean_value_deck()) self.observation = np.concatenate((self.observation,board_int)) #self.observation = np.concatenate((self.observation,board_bool)) #score = self.players[1].compute_score(self.mean_value_deck()) #tiles = len(self.players[1].undiscovered_tiles()) #self.observation = np.concatenate((self.observation,np.array([score,tiles]))) # Display variables from tkinter library if self.testing: self.root = Tk() self.canvas = Canvas(self.root, bg="white", height=650, width=1000)