def AddMax(inputs):
    xmax = K.max(inputs, axis=1, keepdims=True)
    xmin = K.min(inputs, axis=1, keepdims=True)
    xlocmax = K.expand_dims(K.cast_to_floatx(K.argmax(inputs, axis=1)))
    xlocmin = K.expand_dims(K.cast_to_floatx(K.argmin(inputs, axis=1)))
    output = K.concatenate([inputs, xmax, xmin, xlocmax, xlocmin], axis=1)
    return output
 def _process_channel(args):
     __kps, __hm_hp = args
     thresh = 0.1
     __hm_scores, __hm_inds = tf.math.top_k(__hm_hp, k=k, sorted=True)
     __hm_xs = K.cast(__hm_inds % width, 'float32')
     __hm_ys = K.cast(K.cast(__hm_inds / width, 'int32'), 'float32')
     __hp_offset = K.gather(_hp_offset, __hm_inds)
     __hm_xs = __hm_xs + __hp_offset[..., 0]
     __hm_ys = __hm_ys + __hp_offset[..., 1]
     mask = K.cast(__hm_scores > thresh, 'float32')
     __hm_scores = (1. - mask) * -1. + mask * __hm_scores
     __hm_xs = (1. - mask) * -10000. + mask * __hm_xs
     __hm_ys = (1. - mask) * -10000. + mask * __hm_ys
     __hm_kps = K.stack([__hm_xs, __hm_ys], -1)  # k x 2
     __broadcast_hm_kps = K.expand_dims(__hm_kps, 1)  # k x 1 x 2
     __broadcast_kps = K.expand_dims(__kps, 0)  # 1 x k x 2
     dist = K.sqrt(
         K.sum(K.pow(__broadcast_kps - __broadcast_hm_kps, 2),
               2))  # k, k
     min_dist = K.min(dist, 0)
     min_ind = K.argmin(dist, 0)
     __hm_scores = K.gather(__hm_scores, min_ind)
     __hm_kps = K.gather(__hm_kps, min_ind)
     mask = (K.cast(__hm_kps[..., 0] < _x1, 'float32') +
             K.cast(__hm_kps[..., 0] > _x2, 'float32') +
             K.cast(__hm_kps[..., 1] < _y1, 'float32') +
             K.cast(__hm_kps[..., 1] > _y2, 'float32') +
             K.cast(__hm_scores < thresh, 'float32') + K.cast(
                 min_dist > 0.3 *
                 (K.maximum(_wh[..., 0], _wh[..., 1])), 'float32'))
     mask = K.expand_dims(mask, -1)
     mask = K.cast(mask > 0, 'float32')
     __kps = (1. - mask) * __hm_kps + mask * __kps
     return __kps
Exemple #3
0
 def step(self, input_energy_t, states, return_logZ=True):
     # not in the following  `prev_target_val` has shape = (B, F)
     # where B = batch_size, F = output feature dim
     # Note: `i` is of float32, due to the behavior of `K.rnn`
     prev_target_val, i, chain_energy = states[:3]
     t = K.cast(i[0, 0], dtype='int32')
     if len(states) > 3:
         if K.backend() == 'theano':
             m = states[3][:, t:(t + 2)]
         else:
             m = K.slice(states[3], [0, t], [-1, 2])
         input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
         # (1, F, F)*(B, 1, 1) -> (B, F, F)
         chain_energy = chain_energy * K.expand_dims(
             K.expand_dims(m[:, 0] * m[:, 1]))
     if return_logZ:
         # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
         energy = chain_energy + K.expand_dims(
             input_energy_t - prev_target_val, 2)
         new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
         return new_target_val, [new_target_val, i + 1]
     else:
         energy = chain_energy + K.expand_dims(
             input_energy_t + prev_target_val, 2)
         min_energy = K.min(energy, 1)
         # cast for tf-version `K.rnn
         argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
         return argmin_table, [min_energy, i + 1]
Exemple #4
0
 def update_state(self, y_true, y_pred, sample_weight=None):
     row = tf.range(0, tf.shape(y_true)[0], dtype=tf.int64)
     col = k.argmin(y_pred, axis=1)
     pred_val = tf.gather_nd(y_true, tf.stack([row, col], axis=1))
     values = pred_val - k.min(y_true, axis=1)
     # tf.print(tf.gather(y_true,0), tf.gather(y_pred,0), tf.gather(pred_val,0),k.min(y_true, axis=1), values, sep='\n')
     return super(DBM, self).update_state(values,
                                          sample_weight=sample_weight)
Exemple #5
0
def payoff_Eq_MSE(game, pureStrategies_perPlayer, nashEq_true, nashEq_pred,
                  computePayoff_function, num_players):
    """
    Function to compute the the mean square error of equilibria and the mean square error of payoffs resulted from the
    associated equilibria.
    This is not a loss function. It is used by other loss functions to compute their final loss values.
    """

    # Compute error
    error = nashEq_true - nashEq_pred

    # Compute the weight for the final result to recompense the replacement of nans with zeros and its effect
    # on the averaging
    nan_count = tf.reduce_sum(
        tf.cast(tf.math.is_nan(nashEq_true[0][0]), tf.int32))
    eq_n_elements = tf.size(nashEq_true[0][0])
    compensation_factor = tf.cast(eq_n_elements / (eq_n_elements - nan_count),
                                  tf.float32)

    # Replace nan values with 0
    error = tf.where(tf.math.is_nan(error), tf.zeros_like(error), error)

    # Computing the minimum of mean of squared error (MSE) of nash equilibria
    MSE_eq = K.mean(K.square(error), axis=[2, 3])
    min_index = K.argmin(MSE_eq, axis=1)
    loss_Eq_MSE = tf.gather_nd(
        MSE_eq,
        tf.stack(
            (tf.range(0,
                      tf.shape(min_index)[0]), tf.cast(min_index,
                                                       dtype='int32')),
            axis=1))
    loss_Eq_MSE *= compensation_factor

    # Computing the payoffs given the selected output for each sample in the batch
    selected_trueNash = tf.gather_nd(
        nashEq_true,
        tf.stack(
            (tf.range(0,
                      tf.shape(min_index)[0]), tf.cast(min_index,
                                                       dtype='int32')),
            axis=1))
    payoff_true = computePayoff_function['computePayoff'](
        game, selected_trueNash, pureStrategies_perPlayer, num_players)
    payoff_pred = computePayoff_function['computePayoff'](
        game, tf.gather(nashEq_pred, 0,
                        axis=1), pureStrategies_perPlayer, num_players)

    # Computing the mean squared error (MSE) of payoffs
    loss_payoff_MSE = K.mean(K.square(payoff_true - payoff_pred), axis=1)

    return loss_Eq_MSE, loss_payoff_MSE
Exemple #6
0
def chamfer_loss(ref, targ, return_argmin=False):
    nr = ref.shape[0]
    nt = targ.shape[0]

    r = tf.tile(ref, [nt, 1])
    r = tf.reshape(r, [nt, nr, 3])

    t = tf.tile(targ, [1, nr])
    t = tf.reshape(t, [nt, nr, 3])

    dist = K.sum(K.square(r - t), axis=2)

    if (return_argmin == True):
        closest = K.argmin(dist, axis=0)
        loss = (K.mean(K.min(dist, axis=1)) + K.mean(K.min(dist, axis=0))) / 2
        return loss, closest
    else:
        return (K.mean(K.min(dist, axis=1)) + K.mean(K.min(dist, axis=0))) / 2
    def call(self, inputs):
        inputs = K.expand_dims(inputs, -1)
        distance = (inputs - K.expand_dims(self.evaluate_table, 0))**2

        idx = K.argmin(distance, axis=-2)

        takecare = tf.nn.embedding_lookup(self.takecare_table, idx)

        idx_clip = tf.clip_by_value(idx, self.idx_min, self.idx_max)
        idx_table = idx_clip + K.expand_dims(self.idx_table, 0)

        evaluate = tf.nn.embedding_lookup(self.evaluate_table, idx_table)
        vector = tf.nn.embedding_lookup(self.vector_table, idx_table)

        score = (inputs - evaluate)**2
        score = score * -1.0 * takecare
        score = tf.nn.softmax(score, axis=-2)

        outputs = vector * score
        outputs = tf.math.reduce_sum(outputs, axis=-2)
        return outputs
def triplet_hard_loss(y_true, y_pred, margin = 0.3):
    label_shape = K.int_shape(y_true)
    print('label shape: ', label_shape)
    batch_size = label_shape[0]
    print('batch size: ', batch_size)
    print('y_true dim number', K.ndim(y_true))
    assert K.ndim(y_true) == 2, 'triplet hard loss label should be one-dim tensor'
    #anchor = K.l2_normalize(y_pred, axis=1)
    anchor = y_pred
    similarity_matrix = K.dot(anchor, K.transpose(anchor))
    similarity_matrix = K.clip(similarity_matrix, 0.0, 1.0)
    row2mat = K.repeat_elements(y_true, rep=batch_size, axis=1)
    col2mat = K.repeat_elements(K.transpose(y_true), rep=batch_size, axis=0)
    gt_matrix = K.cast(K.equal(row2mat, col2mat), 'float')
    positive_ind = K.argmin(similarity_matrix + 99. * (1 - gt_matrix), axis=1)
    negative_ind = K.argmax(similarity_matrix - 99 * gt_matrix, axis=1)
    positive = K.gather(anchor, positive_ind)
    negative = K.gather(anchor, negative_ind)
    pos_anchor_dist = K.sum(K.square(positive-anchor), axis=1)
    neg_anchor_dist = K.sum(K.square(negative-anchor), axis=1)
    basic_loss = pos_anchor_dist - neg_anchor_dist + margin
    loss = K.maximum(basic_loss, 0.0)
    return loss
Exemple #9
0
def delta_equilibrium(game, pureStrategies_perPlayer, nashEq_true, nashEq_pred,
                      computePayoff_function, num_players):
    """
    Function to compute the delta (regret of not playing equilibrium strategy while others play that).
    """

    # Compute error
    error = nashEq_true - nashEq_pred

    # Replace nan values with 0
    error = tf.where(tf.math.is_nan(error), tf.zeros_like(error), error)

    # Computing indices of the minimum of mean of squared error (MSE) of nash equilibria
    MSE_eq = K.mean(K.square(error), axis=[2, 3])
    min_index = K.argmin(MSE_eq, axis=1)

    # Find the matching true output for each sample in the batch
    selected_trueNash = tf.gather_nd(
        nashEq_true,
        tf.stack(
            (tf.range(0,
                      tf.shape(min_index)[0]), tf.cast(min_index,
                                                       dtype='int32')),
            axis=1))

    # Compute payoff of true equilibria (shape: [batch, players])
    payoff_true = computePayoff_function['computePayoff'](
        game, selected_trueNash, pureStrategies_perPlayer, num_players)

    # Compute the difference in payoff a player when their strategy in true equilibrium is replaced by the predicted
    # strategy (shape: A list with size of players with each element [batch])
    delta_per_player = []
    outperform_eq_per_player = []
    for player in range(num_players):
        # Replace the prediction for a player on the true equilibrium
        unstacked_list = tf.unstack(selected_trueNash, axis=1, num=num_players)
        unstacked_list[player] = nashEq_pred[:, 0, player]
        approx_on_true = tf.stack(unstacked_list, axis=1)

        # Compute payoff for the modified equilibrium
        approx_payoff_current_player = computePayoff_function['computePayoff'](
            game, approx_on_true, pureStrategies_perPlayer, num_players)

        # Compute delta and possible payoff improvement
        delta_per_player.append(
            tf.maximum(
                payoff_true[:, player] -
                approx_payoff_current_player[:, player], 0))
        outperform_eq_per_player.append(
            tf.math.greater(approx_payoff_current_player[:, player],
                            payoff_true[:, player]))

    # Find the maximum delta for all players
    delta_stacked = tf.stack(delta_per_player, axis=1)
    delta = tf.reduce_max(delta_stacked, axis=1)
    # delta = tf.reduce_max(delta_stacked)

    # Also find if any equilibrium better than the classical methods (true equilibrium) found (A scalar)
    outperform_stacked = tf.stack(outperform_eq_per_player, axis=1)
    outperform_no = tf.math.count_nonzero(
        tf.reduce_all(outperform_stacked, axis=1))

    return delta, outperform_no
Exemple #10
0
def hydra_delta_equilibrium(nashEq_predicted, nashEq_true, game,
                            pureStrategies_perPlayer, computePayoff_function,
                            num_players):
    """
    Function to compute the delta (regret of not playing equilibrium strategy while others play that) in a hydra network.
    """

    # Create a row-wise meshgrid of predicted equilibria for each sample in the batch by adding a new dimension and
    # replicate the array along that
    predicted_grid = tf.tile(tf.expand_dims(nashEq_predicted, axis=2),
                             [1, 1, tf.shape(nashEq_predicted)[1], 1, 1])

    # Create a column-wise meshgrid of true equilibria for each sample in the batch by adding a new dimension and
    # replicate the array along that
    true_grid = tf.tile(tf.expand_dims(nashEq_true, axis=1),
                        [1, tf.shape(nashEq_true)[1], 1, 1, 1])

    # Compute error grid
    error_grid = predicted_grid - true_grid

    # Replace nan values with 0
    error_grid = tf.where(tf.math.is_nan(error_grid),
                          tf.zeros_like(error_grid), error_grid)

    # Computing indices of the minimum of mean of squared error (MSE) of nash equilibria
    MSE_eq = K.mean(K.square(error_grid), axis=[3, 4])
    min_index = K.argmin(MSE_eq, axis=2)

    # Convert the indices tensor to make it usable for later tf.gather_nd operations
    indexGrid = tf.reshape(
        min_index, (tf.shape(min_index)[0] * tf.shape(min_index)[1], 1, 1, 1))

    # Find the matching true output for each sample in the batch
    selected_trueNash = tf.squeeze(tf.gather_nd(true_grid,
                                                indexGrid,
                                                batch_dims=2),
                                   axis=2)

    # Compute payoff of true equilibria (shape: [batch, max_eq, players])
    payoff_true = computePayoff_function['computePayoff_2dBatch'](
        game, selected_trueNash, pureStrategies_perPlayer, num_players)

    # Compute the difference in payoff a player when their strategy in true equilibrium is replaced by the predicted
    # strategy (shape: A list with size of players with each element [batch])
    delta_per_player = []
    outperform_eq_per_player = []
    for player in range(num_players):
        # Replace the prediction for a player on the true equilibrium
        unstacked_list = tf.unstack(selected_trueNash, axis=2, num=num_players)
        unstacked_list[player] = nashEq_predicted[:, :, player]
        approx_on_true = tf.stack(unstacked_list, axis=2)

        # Compute payoff for the modified equilibria
        approx_payoff_current_player = computePayoff_function[
            'computePayoff_2dBatch'](game, approx_on_true,
                                     pureStrategies_perPlayer, num_players)

        # Compute delta and possible payoff improvement
        delta_per_player.append(
            tf.maximum(
                payoff_true[:, :, player] -
                approx_payoff_current_player[:, :, player], 0))
        # delta_per_player.append(tf.reduce_max(tf.maximum(payoff_true[:, :, player] - approx_payoff_current_player[:, :, player], 0), axis=1))
        outperform_eq_per_player.append(
            tf.reduce_all(tf.math.greater(
                approx_payoff_current_player[:, :, player],
                payoff_true[:, :, player]),
                          axis=1))

    # Find the maximum delta for all players
    delta_stacked = tf.stack(delta_per_player, axis=2)
    # delta_stacked = tf.stack(delta_per_player, axis=1)
    delta = tf.reduce_max(delta_stacked, axis=2)
    # delta = tf.reduce_max(delta_stacked)

    # Also find if any equilibrium better than the classical methods (true equilibrium) found (A scalar)
    outperform_stacked = tf.stack(outperform_eq_per_player, axis=1)
    outperform_no = tf.math.count_nonzero(
        tf.reduce_all(outperform_stacked, axis=1))

    return delta, outperform_no
Exemple #11
0
 def call(self, inputs, **kwargs):
     return K.cast(K.argmin(inputs, axis=1), dtype=K.floatx())
Exemple #12
0
def hydra_oneSided_payoff_Eq_MSE(nashEq_proposer, nashEq_proposed, game,
                                 pureStrategies_perPlayer,
                                 computePayoff_function, num_players):
    """
    Function to compute MSE of equilibria and payoffs from the matched equilibria with a proposer and proposed defined
    (in Deferred Acceptance Algorithm terms).
    """

    # Create a row-wise meshgrid of proposed equilibria for each sample in the batch by adding a new dimension and
    # replicate the array along that
    proposed_grid = tf.tile(tf.expand_dims(nashEq_proposed, axis=2),
                            [1, 1, tf.shape(nashEq_proposed)[1], 1, 1])

    # Create a column-wise meshgrid of proposer equilibria for each sample in the batch by adding a new dimension and
    # replicate the array along that
    proposer_grid = tf.tile(tf.expand_dims(nashEq_proposer, axis=1),
                            [1, tf.shape(nashEq_proposer)[1], 1, 1, 1])

    # Compute the weight for the final result to recompense the replacement of nans with zeros and its effect
    # on the averaging
    nan_count = tf.reduce_sum(
        tf.cast(tf.math.is_nan(nashEq_proposer[0][0] + nashEq_proposed[0][0]),
                tf.int32))
    eq_n_elements = tf.size(nashEq_proposer[0][0])
    compensation_factor = tf.cast(eq_n_elements / (eq_n_elements - nan_count),
                                  tf.float32)

    # Compute error grid
    error_grid = proposed_grid - proposer_grid

    # Replace nan values with 0
    error_grid = tf.where(tf.math.is_nan(error_grid),
                          tf.zeros_like(error_grid), error_grid)

    # Computing indices of the minimum of mean of squared error (MSE) of nash equilibria
    MSE_eq = K.mean(K.square(error_grid), axis=[3, 4])
    min_index = K.argmin(MSE_eq, axis=2)

    # Convert the indices tensor to make it usable for later tf.gather_nd operations
    indexGrid = tf.reshape(
        min_index, (tf.shape(min_index)[0] * tf.shape(min_index)[1], 1, 1, 1))

    # Find the minimum of mean of squared error (MSE) of nash equilibria
    loss_Eq_MSE = K.max(tf.squeeze(tf.gather_nd(MSE_eq,
                                                indexGrid,
                                                batch_dims=2),
                                   axis=[2]),
                        axis=1)
    loss_Eq_MSE *= compensation_factor

    # Computing the payoffs given the selected output for each sample in the batch
    selected_proposerNash = tf.squeeze(tf.gather_nd(proposer_grid,
                                                    indexGrid,
                                                    batch_dims=2),
                                       axis=2)
    payoff_proposer = computePayoff_function['computePayoff_2dBatch'](
        game, selected_proposerNash, pureStrategies_perPlayer, num_players)
    payoff_proposed = computePayoff_function['computePayoff_2dBatch'](
        game, nashEq_proposed, pureStrategies_perPlayer, num_players)

    # Computing the mean squared error (MSE) of payoffs
    loss_payoff_MSE = K.max(K.mean(K.square(payoff_proposed - payoff_proposer),
                                   axis=2),
                            axis=1)

    return loss_Eq_MSE, loss_payoff_MSE