Beispiel #1
0
def LogME(f: tf.Tensor, y: tf.Tensor, regression=False):
    f = f.numpy().astype(np.float64)
    y = y.numpy()
    if regression:
        y = y.numpy().astype(np.float64)

    fh = f
    f = f.transpose()
    D, N = f.shape
    v, s, vh = np.linalg.svd(f @ fh, full_matrices=True)

    evidences = []
    if regression:
        K = y.shape[1]
        for i in range(K):
            y_ = y[:, i]
            evidence = each_evidence(y_, f, fh, v, s, vh, N, D)
            evidences.append(evidence)
    else:
        K = int(y.max() + 1)
        for i in range(K):
            y_ = (y == i).astype(np.float64)
            evidence = each_evidence(y_, f, fh, v, s, vh, N, D)
            evidences.append(evidence)
    return np.mean(evidences)
Beispiel #2
0
    def jsd(p: tf.Tensor, q: tf.Tensor, base=np.e):
        """
            Implementation of pairwise Jensen-Shannon Divergence based on
            https://en.wikipedia.org/wiki/Jensen%E2%80%93Shannon_divergence

            This returns NaNs for all zero probs (unlabeled).
        """
        import scipy.stats

        p, q = p.numpy(), q.numpy()
        # normalize p, q to probabilities
        p, q = p / p.sum(axis=-1, keepdims=True), q / q.sum(axis=-1,
                                                            keepdims=True)
        p, q = p.transpose(), q.transpose()
        m = 1.0 / 2 * (p + q)
        jsd = (scipy.stats.entropy(p, m, base=base) / 2.0 +
               scipy.stats.entropy(q, m, base=base) / 2.0)
        jsd = np.clip(jsd, 0.0, 1.0).transpose()
        return jsd