class NFSPAgent(object): ''' NFSP Agent implementation in TensorFlow. ''' def __init__(self, sess, scope, action_num=4, state_shape=None, hidden_layers_sizes=None, reservoir_buffer_capacity=int(1e6), anticipatory_param=0.1, batch_size=256, train_every=1, rl_learning_rate=0.1, sl_learning_rate=0.005, min_buffer_size_to_learn=1000, q_replay_memory_size=30000, q_replay_memory_init_size=1000, q_update_target_estimator_every=1000, q_discount_factor=0.99, q_epsilon_start=0.06, q_epsilon_end=0, q_epsilon_decay_steps=int(1e6), q_batch_size=256, q_train_every=1, q_mlp_layers=None, evaluate_with='average_policy'): ''' Initialize the NFSP agent. Args: sess (tf.Session): Tensorflow session object. scope (string): The name scope of NFSPAgent. action_num (int): The number of actions. state_shape (list): The shape of the state space. hidden_layers_sizes (list): The hidden layers sizes for the layers of the average policy. reservoir_buffer_capacity (int): The size of the buffer for average policy. anticipatory_param (float): The hyper-parameter that balances rl/avarage policy. batch_size (int): The batch_size for training average policy. train_every (int): Train the SL policy every X steps. rl_learning_rate (float): The learning rate of the RL agent. sl_learning_rate (float): the learning rate of the average policy. min_buffer_size_to_learn (int): The minimum buffer size to learn for average policy. q_replay_memory_size (int): The memory size of inner DQN agent. q_replay_memory_init_size (int): The initial memory size of inner DQN agent. q_update_target_estimator_every (int): The frequency of updating target network for inner DQN agent. q_discount_factor (float): The discount factor of inner DQN agent. q_epsilon_start (float): The starting epsilon of inner DQN agent. q_epsilon_end (float): the end epsilon of inner DQN agent. q_epsilon_decay_steps (int): The decay steps of inner DQN agent. q_batch_size (int): The batch size of inner DQN agent. q_train_step (int): Train the model every X steps. q_mlp_layers (list): The layer sizes of inner DQN agent. evaluate_with (string): The value can be 'best_response' or 'average_policy' ''' self.use_raw = False self._sess = sess self._scope = scope self._action_num = action_num self._state_shape = state_shape self._layer_sizes = hidden_layers_sizes self._batch_size = batch_size self._train_every = train_every self._sl_learning_rate = sl_learning_rate self._anticipatory_param = anticipatory_param self._min_buffer_size_to_learn = min_buffer_size_to_learn self._reservoir_buffer = ReservoirBuffer(reservoir_buffer_capacity) self._prev_timestep = None self._prev_action = None self.evaluate_with = evaluate_with # Total timesteps self.total_t = 0 # Step counter to keep track of learning. self._step_counter = 0 with tf.compat.v1.variable_scope(scope): # Inner RL agent self._rl_agent = DQNAgent( sess, scope + '_dqn', q_replay_memory_size, q_replay_memory_init_size, q_update_target_estimator_every, q_discount_factor, q_epsilon_start, q_epsilon_end, q_epsilon_decay_steps, q_batch_size, action_num, state_shape, q_train_every, q_mlp_layers, rl_learning_rate) with tf.compat.v1.variable_scope('sl'): # Build supervised model self._build_model() self.sample_episode_policy() def _build_model(self): ''' build the model for supervised learning ''' # Placeholders. input_shape = [None] input_shape.extend(self._state_shape) self._info_state_ph = tf.compat.v1.placeholder(shape=input_shape, dtype=tf.float32) self._X = tf.keras.layers.Flatten(self._info_state_ph) # Boolean to indicate whether is training or not self.is_train = tf.compat.v1.placeholder(tf.bool, name="is_train") # Batch Normalization self._X = tf.compat.v1.layers.batch_normalization(self._X, training=True) self._action_probs_ph = tf.compat.v1.placeholder( shape=[None, self._action_num], dtype=tf.float32) # Average policy network. fc = self._X # to be fixed for dim in self._layer_sizes: fc.add(tf.keras.layers.Dense(dim, activation=tf.tanh)) self._avg_policy = tf.keras.layers.Dense(self._action_num, activation=None) self._avg_policy_probs = tf.nn.softmax(self._avg_policy) # Loss self._loss = tf.reduce_mean( input_tensor=tf.nn.softmax_cross_entropy_with_logits( labels=tf.stop_gradient(self._action_probs_ph), logits=self._avg_policy)) optimizer = tf.compat.v1.train.AdamOptimizer( learning_rate=self._sl_learning_rate, name='nfsp_adam') update_ops = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.UPDATE_OPS, scope=tf.compat.v1.get_variable_scope().name) with tf.control_dependencies(update_ops): self._learn_step = optimizer.minimize(self._loss) def feed(self, ts): ''' Feed data to inner RL agent Args: ts (list): A list of 5 elements that represent the transition. ''' self._rl_agent.feed(ts) self.total_t += 1 if self.total_t > 0 and len( self._reservoir_buffer ) >= self._min_buffer_size_to_learn and self.total_t % self._train_every == 0: sl_loss = self.train_sl() print('\rINFO - Agent {}, step {}, sl-loss: {}'.format( self._scope, self.total_t, sl_loss), end='') def step(self, state): ''' Returns the action to be taken. Args: state (dict): The current state Returns: action (int): An action id ''' obs = state['obs'] legal_actions = state['legal_actions'] if self._mode == MODE.best_response: probs = self._rl_agent.predict(obs) one_hot = np.eye(len(probs))[np.argmax(probs)] self._add_transition(obs, one_hot) elif self._mode == MODE.average_policy: probs = self._act(obs) probs = remove_illegal(probs, legal_actions) action = np.random.choice(len(probs), p=probs) return action def eval_step(self, state): ''' Use the average policy for evaluation purpose Args: state (dict): The current state. Returns: action (int): An action id. probs (list): The list of action probabilies ''' if self.evaluate_with == 'best_response': action, probs = self._rl_agent.eval_step(state) elif self.evaluate_with == 'average_policy': obs = state['obs'] legal_actions = state['legal_actions'] probs = self._act(obs) probs = remove_illegal(probs, legal_actions) action = np.random.choice(len(probs), p=probs) else: raise ValueError( "'evaluate_with' should be either 'average_policy' or 'best_response'." ) return action, probs def sample_episode_policy(self): ''' Sample average/best_response policy ''' if np.random.rand() < self._anticipatory_param: self._mode = MODE.best_response else: self._mode = MODE.average_policy def _act(self, info_state): ''' Predict action probability givin the observation and legal actions Args: info_state (numpy.array): An obervation. Returns: action_probs (numpy.array): The predicted action probability. ''' info_state = np.expand_dims(info_state, axis=0) action_probs = self._sess.run(self._avg_policy_probs, feed_dict={ self._info_state_ph: info_state, self.is_train: False })[0] return action_probs def _add_transition(self, state, probs): ''' Adds the new transition to the reservoir buffer. Transitions are in the form (state, probs). Args: state (numpy.array): The state. probs (numpy.array): The probabilities of each action. ''' transition = Transition(info_state=state, action_probs=probs) self._reservoir_buffer.add(transition) def train_sl(self): ''' Compute the loss on sampled transitions and perform a avg-network update. If there are not enough elements in the buffer, no loss is computed and `None` is returned instead. Returns: loss (float): The average loss obtained on this batch of transitions or `None`. ''' if (len(self._reservoir_buffer) < self._batch_size or len(self._reservoir_buffer) < self._min_buffer_size_to_learn): return None transitions = self._reservoir_buffer.sample(self._batch_size) info_states = [t.info_state for t in transitions] action_probs = [t.action_probs for t in transitions] loss, _ = self._sess.run( [self._loss, self._learn_step], feed_dict={ self._info_state_ph: info_states, self._action_probs_ph: action_probs, self.is_train: True, }) return loss
class NFSPAgent(object): ''' NFSP Agent implementation in TensorFlow. ''' def __init__(self, sess, scope, action_num=4, state_shape=None, hidden_layers_sizes=None, reservoir_buffer_capacity=int(1e6), anticipatory_param=0.1, batch_size=256, train_every=1, rl_learning_rate=0.1, sl_learning_rate=0.005, min_buffer_size_to_learn=1000, q_replay_memory_size=30000, q_replay_memory_init_size=1000, q_update_target_estimator_every=1000, q_discount_factor=0.99, q_epsilon_start=0.06, q_epsilon_end=0, q_epsilon_decay_steps=int(1e6), q_batch_size=256, q_train_every=1, q_mlp_layers=None, evaluate_with='average_policy'): ''' Initialize the NFSP agent. Args: sess (tf.Session): Tensorflow session object. scope (string): The name scope of NFSPAgent. action_num (int): The number of actions. state_shape (list): The shape of the state space. hidden_layers_sizes (list): The hidden layers sizes for the layers of the average policy. reservoir_buffer_capacity (int): The size of the buffer for average policy. anticipatory_param (float): The hyper-parameter that balances rl/avarage policy. batch_size (int): The batch_size for training average policy. train_every (int): Train the SL policy every X steps. rl_learning_rate (float): The learning rate of the RL agent. sl_learning_rate (float): the learning rate of the average policy. min_buffer_size_to_learn (int): The minimum buffer size to learn for average policy. q_replay_memory_size (int): The memory size of inner DQN agent. q_replay_memory_init_size (int): The initial memory size of inner DQN agent. q_update_target_estimator_every (int): The frequency of updating target network for inner DQN agent. q_discount_factor (float): The discount factor of inner DQN agent. q_epsilon_start (float): The starting epsilon of inner DQN agent. q_epsilon_end (float): the end epsilon of inner DQN agent. q_epsilon_decay_steps (int): The decay steps of inner DQN agent. q_batch_size (int): The batch size of inner DQN agent. q_train_step (int): Train the model every X steps. q_mlp_layers (list): The layer sizes of inner DQN agent. evaluate_with (string): The value can be 'best_response' or 'average_policy' ''' self.use_raw = False self._sess = sess self._scope = scope self._action_num = action_num self._state_shape = state_shape self._layer_sizes = hidden_layers_sizes self._batch_size = batch_size self._train_every = train_every self._sl_learning_rate = sl_learning_rate self._anticipatory_param = anticipatory_param self._min_buffer_size_to_learn = min_buffer_size_to_learn self._reservoir_buffer = ReservoirBuffer(reservoir_buffer_capacity) self._prev_timestep = None self._prev_action = None self.evaluate_with = evaluate_with self.d = { 0: 'A', 1: '2', 2: '3', 3: '4', 4: '5', 5: '6', 6: '7', 7: '8', 8: '9', 9: 'T', 10: 'J', 11: 'Q', 12: 'K' } self.s = {0: 's', 1: 'h', 2: 'd', 3: 'c'} self.c2n = { '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14 } self.late_range = Range( '22+, A2s+, K2s+, Q2s+, J2s+, J8, T9, 98, 87, 76s, 65s, 54s, 98s+, K9+, Q8+, J7+, T6s+, A9+' ) # Total timesteps self.total_t = 0 # Step counter to keep track of learning. self._step_counter = 0 with tf.variable_scope(scope): # Inner RL agent self._rl_agent = DQNAgent( sess, scope + '_dqn', q_replay_memory_size, q_replay_memory_init_size, q_update_target_estimator_every, q_discount_factor, q_epsilon_start, q_epsilon_end, q_epsilon_decay_steps, q_batch_size, action_num, state_shape, q_train_every, q_mlp_layers, rl_learning_rate) with tf.variable_scope('sl'): # Build supervised model self._build_model() self.sample_episode_policy() def _build_model(self): ''' build the model for supervised learning ''' # Placeholders. input_shape = [None] input_shape.extend(self._state_shape) self._info_state_ph = tf.placeholder(shape=input_shape, dtype=tf.float32) self._X = tf.contrib.layers.flatten(self._info_state_ph) # Boolean to indicate whether is training or not self.is_train = tf.placeholder(tf.bool, name="is_train") # Batch Normalization self._X = tf.layers.batch_normalization(self._X, training=True) self._action_probs_ph = tf.placeholder(shape=[None, self._action_num], dtype=tf.float32) # Average policy network. fc = self._X for dim in self._layer_sizes: fc = tf.contrib.layers.fully_connected(fc, dim, activation_fn=tf.tanh) self._avg_policy = tf.contrib.layers.fully_connected( fc, self._action_num, activation_fn=None) self._avg_policy_probs = tf.nn.softmax(self._avg_policy) # Loss self._loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( labels=tf.stop_gradient(self._action_probs_ph), logits=self._avg_policy)) optimizer = tf.train.AdamOptimizer( learning_rate=self._sl_learning_rate, name='nfsp_adam') update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=tf.get_variable_scope().name) with tf.control_dependencies(update_ops): self._learn_step = optimizer.minimize(self._loss) def feed(self, ts): ''' Feed data to inner RL agent Args: ts (list): A list of 5 elements that represent the transition. ''' self._rl_agent.feed(ts) self.total_t += 1 if self.total_t > 0 and len( self._reservoir_buffer ) >= self._min_buffer_size_to_learn and self.total_t % self._train_every == 0: sl_loss = self.train_sl() print('\rINFO - Agent {}, step {}, sl-loss: {}'.format( self._scope, self.total_t, sl_loss), end='') def step(self, state): ''' Returns the action to be taken. Args: state (dict): The current state Returns: action (int): An action id ''' self.sample_episode_policy() cards = '' pos = 0 for i in state['obs']: if (i == 1 and pos < 52): cards = cards + self.d[pos % 13] + '' + self.s[pos // 13] pos += 1 # if(len(cards) == 4 and not Combo(cards) in self.late_range.combos): # return 0, 1 tab = [] handcards = cards for i in state['public_cards']: tab.append((self.c2n[i[1]], i[0].lower())) hand = [] for i in range(0, len(handcards), 2): hand.append((self.c2n[handcards[i]], handcards[i + 1])) # print(tab) hand = [x for x in hand if x not in tab] stt = mcst.PokerState(hand, tab, state['cur'], state['opp'], abs(state['obs'][-2] - state['obs'][-1]), state['obs'][-2] + state['obs'][-1], state['obs'][52], state['obs'][53]) # print(hand, tab, 250 - min(state['obs'][-2:]), 250 - max(state['obs'][-2:]), abs(state['obs'][-2] - state['obs'][-1]), state['obs'][-2] + state['obs'][-1], min(state['obs'][-2:]), max(state['obs'][-2:])) # mcst.PokerState() obs = state['obs'] legal_actions = state['legal_actions'] par = mcst.MCTS(1) if self._mode == MODE.best_response: probs = self._rl_agent.predict(obs) m = par.UCT(rootstate=stt, itermax=50000, processes=16, verbose=False) m = m[0] probs[m] += 1 probs = remove_illegal(probs, legal_actions) probs /= sum(probs) elif self._mode == MODE.average_policy: probs = self._act(obs) one_hot = np.eye(len(probs))[np.argmax(probs)] self._add_transition(obs, one_hot) probs = remove_illegal(probs, legal_actions) action = np.random.choice(len(probs), p=probs) # print(m, action) return action def eval_step(self, state): ''' Use the average policy for evaluation purpose Args: state (dict): The current state. Returns: action (int): An action id. probs (list): The list of action probabilies ''' cards = '' pos = 0 for i in state['obs']: if (i == 1 and pos < 52): cards = cards + self.d[pos % 13] + '' + self.s[pos // 13] pos += 1 # if(len(cards) == 4 and not Combo(cards) in self.late_range.combos): # return 0, 1 tab = [] handcards = cards legal_actions = state['legal_actions'] for i in state['public_cards']: tab.append((self.c2n[i[1]], i[0].lower())) hand = [] for i in range(0, len(handcards), 2): hand.append((self.c2n[handcards[i]], handcards[i + 1])) hand = [x for x in hand if x not in tab] stt = mcst.PokerState(hand, tab, state['cur'], state['opp'], abs(state['obs'][-2] - state['obs'][-1]), state['obs'][-2] + state['obs'][-1], state['obs'][52], state['obs'][53]) par = mcst.MCTS(1) # print(state) if self.evaluate_with == 'best_response': action, probs = self._rl_agent.eval_step(state) m = par.UCT(rootstate=stt, itermax=100000, processes=32, verbose=False) print(m, probs) m = m[0] probs[m] += 1 # if probs[1] == probs[3] and probs[3] == probs[4] and probs[4] == probs[5]: # probs[2] /= 25 # probs[m] += 2 # elif not m == 5: # probs[m] += 2 # else: # probs[4] += 3 # if(len(tab) == 0): # probs[5] = 0 # else: # probs[5] /= 4 probs = remove_illegal(probs, legal_actions) probs /= sum(probs) elif self.evaluate_with == 'average_policy': obs = state['obs'] probs = self._act(obs) else: raise ValueError( "'evaluate_with' should be either 'average_policy' or 'best_response'." ) probs = remove_illegal(probs, legal_actions) action = np.random.choice(len(probs), p=probs) if (action == 0 and 1 in legal_actions): action = 1 # print(action, probs) return action, probs def sample_episode_policy(self): ''' Sample average/best_response policy ''' if np.random.rand() < self._anticipatory_param: self._mode = MODE.best_response else: self._mode = MODE.average_policy def _act(self, info_state): ''' Predict action probability givin the observation and legal actions Args: info_state (numpy.array): An obervation. Returns: action_probs (numpy.array): The predicted action probability. ''' info_state = np.expand_dims(info_state, axis=0) action_probs = self._sess.run(self._avg_policy_probs, feed_dict={ self._info_state_ph: info_state, self.is_train: False })[0] return action_probs def _add_transition(self, state, probs): ''' Adds the new transition to the reservoir buffer. Transitions are in the form (state, probs). Args: state (numpy.array): The state. probs (numpy.array): The probabilities of each action. ''' transition = Transition(info_state=state, action_probs=probs) self._reservoir_buffer.add(transition) def train_sl(self): ''' Compute the loss on sampled transitions and perform a avg-network update. If there are not enough elements in the buffer, no loss is computed and `None` is returned instead. Returns: loss (float): The average loss obtained on this batch of transitions or `None`. ''' if (len(self._reservoir_buffer) < self._batch_size or len(self._reservoir_buffer) < self._min_buffer_size_to_learn): return None transitions = self._reservoir_buffer.sample(self._batch_size) info_states = [t.info_state for t in transitions] action_probs = [t.action_probs for t in transitions] loss, _ = self._sess.run( [self._loss, self._learn_step], feed_dict={ self._info_state_ph: info_states, self._action_probs_ph: action_probs, self.is_train: True, }) return loss
class NFSPAgent(object): ''' NFSP Agent implementation in TensorFlow. ''' def __init__(self, sess, scope, action_num=4, state_shape=None, hidden_layers_sizes=None, reservoir_buffer_capacity=int(1e6), anticipatory_param=0.5, batch_size=256, rl_learning_rate=0.0001, sl_learning_rate=0.00001, min_buffer_size_to_learn=1000, q_replay_memory_size=30000, q_replay_memory_init_size=1000, q_update_target_estimator_every=1000, q_discount_factor=0.99, q_epsilon_start=1, q_epsilon_end=0.1, q_epsilon_decay_steps=int(1e6), q_batch_size=256, q_norm_step=1000, q_mlp_layers=None): ''' Initialize the NFSP agent. Args: sess (tf.Session): Tensorflow session object. scope (string): The name scope of NFSPAgent. action_num (int): The number of actions. state_shape (list): The shape of the state space. hidden_layers_sizes (list): The hidden layers sizes for the layers of the average policy. reservoir_buffer_capacity (int): The size of the buffer for average policy. anticipatory_param (float): The hyper-parameter that balances rl/avarage policy. batch_size (int): The batch_size for training average policy. rl_learning_rate (float): The learning rate of the RL agent. sl_learning_rate (float): the learning rate of the average policy. min_buffer_size_to_learn (int): The minimum buffer size to learn for average policy. q_replay_memory_size (int): The memory size of inner DQN agent. q_replay_memory_init_size (int): The initial memory size of inner DQN agent. q_update_target_estimator_every (int): The frequency of updating target network for inner DQN agent. q_discount_factor (float): The discount factor of inner DQN agent. q_epsilon_start (float): The starting epsilon of inner DQN agent. q_epsilon_end (float): the end epsilon of inner DQN agent. q_epsilon_decay_steps (int): The decay steps of inner DQN agent. q_batch_size (int): The batch size of inner DQN agent. q_norm_step (int): The normalization steps of inner DQN agent. q_mlp_layers (list): The layer sizes of inner DQN agent. ''' self._sess = sess self._action_num = action_num self._state_shape = state_shape self._layer_sizes = hidden_layers_sizes + [action_num] self._batch_size = batch_size self._sl_learning_rate = sl_learning_rate self._anticipatory_param = anticipatory_param self._min_buffer_size_to_learn = min_buffer_size_to_learn self._reservoir_buffer = ReservoirBuffer(reservoir_buffer_capacity) self._prev_timestep = None self._prev_action = None # Step counter to keep track of learning. self._step_counter = 0 with tf.variable_scope(scope): # Inner RL agent self._rl_agent = DQNAgent( sess, 'dqn', q_replay_memory_size, q_replay_memory_init_size, q_update_target_estimator_every, q_discount_factor, q_epsilon_start, q_epsilon_end, q_epsilon_decay_steps, q_batch_size, action_num, state_shape, q_norm_step, q_mlp_layers, rl_learning_rate) # Build supervised model self._build_model() self.sample_episode_policy() def _build_model(self): ''' build the model for supervised learning ''' # Placeholders. input_shape = [None] input_shape.extend(self._state_shape) self._info_state_ph = tf.placeholder(shape=input_shape, dtype=tf.float32) self._X = tf.contrib.layers.flatten(self._info_state_ph) self._action_probs_ph = tf.placeholder(shape=[None, self._action_num], dtype=tf.float32) # Average policy network. self._avg_network = snt.nets.MLP(output_sizes=self._layer_sizes) self._avg_policy = self._avg_network(self._X) self._avg_policy_probs = tf.nn.softmax(self._avg_policy) # Loss self._loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( labels=tf.stop_gradient(self._action_probs_ph), logits=self._avg_policy)) optimizer = tf.train.AdamOptimizer( learning_rate=self._sl_learning_rate, name='nfsp_adam') self._learn_step = optimizer.minimize(self._loss) def feed(self, ts): ''' Feed data to inner RL agent Args: ts (list): A list of 5 elements that represent the transition. ''' self._rl_agent.feed(ts) def step(self, state): ''' Returns the action to be taken. Args: state (dict): The current state Returns: action (int): An action id ''' obs = state['obs'] legal_actions = state['legal_actions'] if self._mode == MODE.best_response: probs = self._rl_agent.predict(obs) self._add_transition(obs, probs) elif self._mode == MODE.average_policy: probs = self._act(obs) probs = remove_illegal(probs, legal_actions) action = np.random.choice(len(probs), p=probs) return action def eval_step(self, state): ''' Use the average policy for evaluation purpose Args: state (dict): The current state. Returns: action (int): An action id. ''' action = self._rl_agent.eval_step(state) return action def sample_episode_policy(self): ''' Sample average/best_response policy ''' if np.random.rand() < self._anticipatory_param: self._mode = MODE.best_response else: self._mode = MODE.average_policy def _act(self, info_state): ''' Predict action probability givin the observation and legal actions Args: info_state (numpy.array): An obervation. Returns: action_probs (numpy.array): The predicted action probability. ''' info_state = np.expand_dims(info_state, axis=0) action_probs = self._sess.run( self._avg_policy_probs, feed_dict={self._info_state_ph: info_state})[0] return action_probs def _add_transition(self, state, probs): ''' Adds the new transition to the reservoir buffer. Transitions are in the form (state, probs). Args: state (numpy.array): The state. probs (numpy.array): The probabilities of each action. ''' #print(len(self._reservoir_buffer)) transition = Transition(info_state=state, action_probs=probs) self._reservoir_buffer.add(transition) def train_rl(self): ''' Update the inner RL agent ''' return self._rl_agent.train() def train_sl(self): ''' Compute the loss on sampled transitions and perform a avg-network update. If there are not enough elements in the buffer, no loss is computed and `None` is returned instead. Returns: loss (float): The average loss obtained on this batch of transitions or `None`. ''' if (len(self._reservoir_buffer) < self._batch_size or len(self._reservoir_buffer) < self._min_buffer_size_to_learn): return None transitions = self._reservoir_buffer.sample(self._batch_size) info_states = [t.info_state for t in transitions] action_probs = [t.action_probs for t in transitions] loss, _ = self._sess.run( [self._loss, self._learn_step], feed_dict={ self._info_state_ph: info_states, self._action_probs_ph: action_probs, }) return loss