def max(self, x):
        pred = self._predict(x)
        xp = cuda.get_array_module(pred)

        # We randomly permute to break ties by random shuffling
        idx = as_variable(xp.random.uniform(0.0, 1.0, pred.shape).argsort())
        pred = select_items_per_row(pred, idx)

        # Perform argsort
        action = as_variable(xp.fliplr(xp.argsort(pred.data, axis=1)))

        # Revert original permutation
        action = select_items_per_row(idx, action)

        return self._cut(action)
Ejemplo n.º 2
0
    def forward(self, inputs):
        xp = cuda.get_array_module(*inputs)
        ranking, relevance_labels = inputs

        # Computing nDCG on empty array should just return 0.0
        if ranking.shape[1] == 0:
            return xp.zeros(ranking.shape[0]),

        # Top-k cutoff
        last = ranking.shape[1]
        if self.k > 0:
            last = min(self.k, last)

        # For the rankings, compute the relevance labels in order
        relevance = select_items_per_row(as_variable(relevance_labels),
                                         as_variable(ranking))
        relevance = relevance[:, :last].data.astype(dtype=xp.float32)

        # Compute numerator of DCG formula
        if self.exp:
            numerator = (2.0 ** relevance) - 1.0
        else:
            numerator = relevance

        # Compute denominator of DCG formula
        arange = xp.broadcast_to(2.0 + xp.arange(relevance.shape[1]),
                                 relevance.shape)
        denominator = xp.log2(arange)

        if self.k >= 0:
            return xp.asarray(xp.sum(numerator / denominator, axis=1)),
        else:
            return xp.asarray(xp.cumsum(numerator / denominator, axis=1)),
Ejemplo n.º 3
0
def listpl(x, t, nr_docs, α=10.0):
    """
    The ListPL loss, a stochastic variant of ListMLE that in expectation
    approximates the true ListNet loss.

    :param x: The activation of the previous layer
    :type x: chainer.Variable

    :param t: The target labels
    :type t: chainer.Variable

    :param nr_docs: The number of documents per query
    :type nr_docs: chainer.Variable

    :param α: The temperature parameter of the plackett-luce
    :type α: float

    :return: The loss
    :rtype: chainer.Variable
    """
    t, nr_docs = as_variable(t), as_variable(nr_docs)
    t = as_variable(t.data.astype(x.dtype))
    t = cf.log_softmax(t * α)
    indices = sample_without_replacement(t)

    x_hat = select_items_per_row(x, indices)

    # Compute MLE loss
    per_sample_loss = -cf.sum(x_hat - logcumsumexp(x_hat), axis=1)
    return cf.mean(per_sample_loss)
Ejemplo n.º 4
0
def listmle(x, t, nr_docs):
    """
    The ListMLE loss as in Xia et al (2008), Listwise Approach to Learning to
    Rank - Theory and Algorithm.

    :param x: The activation of the previous layer
    :type x: chainer.Variable

    :param t: The target labels
    :type t: chainer.Variable

    :param nr_docs: The number of documents per query
    :type nr_docs: chainer.Variable

    :return: The loss
    :rtype: chainer.Variable
    """
    t, nr_docs = as_variable(t), as_variable(nr_docs)

    # Get the ground truth by sorting activations by the relevance labels
    indices = argsort(t, axis=1)
    x_hat = select_items_per_row(x, cf.flip(indices, axis=1))

    # Compute MLE loss
    per_sample_loss = -cf.sum(x_hat - logcumsumexp(x_hat), axis=1)
    return cf.mean(per_sample_loss)
Ejemplo n.º 5
0
def test_select_items_identity():
    idx = as_variable(np.array([[0, 1, 2, 3], [0, 1, 2, 3]]))
    val = as_variable(np.array([[0.5, 3.14, 0.0, -9.9], [1.0, -1.0, 1.0,
                                                         4.0]]))

    out = select_items_per_row(val, idx)

    assert_allclose(out.data, val.data)
Ejemplo n.º 6
0
def test_select_items_none():
    idx = as_variable(np.array([[], []], dtype=np.int32))
    val = as_variable(np.array([[0.5, 3.14, 0.0, -9.9], [1.0, -1.0, 1.0,
                                                         4.0]]))

    out = select_items_per_row(val, idx)

    assert_allclose(out.data, np.array([[], []], dtype=np.int32))
Ejemplo n.º 7
0
def test_select_items_less_idx():
    idx = as_variable(np.array([[3, 1], [1, 3]]))
    val = as_variable(np.array([[0.5, 3.14, 0.0, -9.9], [1.0, -1.0, 1.0,
                                                         4.0]]))
    exp = as_variable(np.array([[-9.9, 3.14], [-1.0, 4.0]]))

    out = select_items_per_row(val, idx)

    assert_allclose(out.data, exp.data)
    def log_propensity_independent(self, x, action):
        xp = cuda.get_array_module(action)
        pred = self._predict(x)

        final_action = action
        if self.k > 0 and action.shape[1] < pred.shape[1]:
            all_actions = F.broadcast_to(xp.arange(0, pred.shape[1],
                                                   dtype=action.data.dtype),
                                         pred.shape)
            inv_items = inverse_select_items_per_row(all_actions, action)
            items = select_items_per_row(all_actions, action)
            final_action = F.concat((items, inv_items), axis=1)

        pred = select_items_per_row(pred, final_action)

        results = F.log_softmax(pred)
        if self.k > 0:
            results = results[:, :self.k]
        return results
Ejemplo n.º 9
0
def unpad(permutation, nr_docs):
    """
    Unpads a permutation. The variable nr_docs indicates, for each row in the
    permutation mini-batch, how many documents were in the original entry.
    Unpadding happens by shifting documents that were padded to the end of the
    permutation while retaining the order of the other documents in the
    original.

    :param permutation: The permutation to re-arrange
    :type permutation: chainer.Variable

    :param nr_docs: The number of documents
    :type nr_docs: chainer.Variable

    :return: An unpad version of the permutation
    :rtype: chainer.Variable
    """
    xp = cuda.get_array_module(permutation, nr_docs)

    permutation_d, nr_docs_d = permutation.data, nr_docs.data

    arange = xp.broadcast_to(xp.arange(permutation.shape[1]),
                             permutation.shape)
    arange_1 = xp.copy(arange)
    arange_2 = xp.copy(arange)

    arange_1[permutation_d >= nr_docs_d[:, None]] = permutation.shape[1] + 1
    arange_2[permutation_d < nr_docs_d[:, None]] = -1

    arange_1_s = xp.sort(arange_1)
    arange_2_s = xp.sort(arange_2)

    arange_1_s[arange_1_s == permutation.shape[1] + 1] = 0
    arange_2_s[arange_2_s == -1] = 0

    indices = arange_1_s + arange_2_s

    return select_items_per_row(permutation, as_variable(indices))