def entropy_binary_vector(P):
    """
        if P[i,j] represents the probability
            of some binary random variable X[i,j] being 1
        then rval[i] gives the entropy of the random vector
        X[i,:]
    """

    oneMinusP = 1.-P

    PlogP = xlogx(P)
    omPlogOmP = xlogx(oneMinusP)

    term1 = - T.sum( PlogP , axis=1)
    assert len(term1.type.broadcastable) == 1

    term2 = - T.sum( omPlogOmP , axis =1 )
    assert len(term2.type.broadcastable) == 1

    rval = term1 + term2

    for plp, olo, t1, t2, rv in get_debug_values(PlogP, omPlogOmP, term1, term2, rval):
        debug_assert(not np.any(np.isnan(plp)))
        debug_assert(not np.any(np.isinf(olo)))
        debug_assert(not np.any(np.isnan(plp)))
        debug_assert(not np.any(np.isinf(olo)))

        debug_assert(not np.any(np.isnan(t1)))
        debug_assert(not np.any(np.isnan(t2)))
        debug_assert(not np.any(np.isnan(rv)))

    return rval
示例#2
0
    def test_basic(self):
        x = as_tensor_variable([1, 0])
        y = xlogx(x)
        f = theano.function([], [y])
        assert numpy.all(f() == numpy.asarray([0, 0.0]))

        # class Dummy(object):
        #     def make_node(self, a):
        #         return [xlogx(a)[:,2]]
        utt.verify_grad(xlogx, [numpy.random.rand(3, 4)])
示例#3
0
def entropy_binary_vector(P):
    """
    .. todo::

        WRITEME properly

    if P[i,j] represents the probability
        of some binary random variable X[i,j] being 1
    then rval[i] gives the entropy of the random vector
    X[i,:]
    """

    for Pv in get_debug_values(P):
        assert Pv.min() >= 0.0
        assert Pv.max() <= 1.0

    oneMinusP = 1. - P

    PlogP = xlogx(P)
    omPlogOmP = xlogx(oneMinusP)

    term1 = -T.sum(PlogP, axis=1)
    assert len(term1.type.broadcastable) == 1

    term2 = -T.sum(omPlogOmP, axis=1)
    assert len(term2.type.broadcastable) == 1

    rval = term1 + term2

    for plp, olo, t1, t2, rv in get_debug_values(PlogP, omPlogOmP, term1,
                                                 term2, rval):
        debug_assert(not np.any(np.isnan(plp)))
        debug_assert(not np.any(np.isinf(olo)))
        debug_assert(not np.any(np.isnan(plp)))
        debug_assert(not np.any(np.isinf(olo)))

        debug_assert(not np.any(np.isnan(t1)))
        debug_assert(not np.any(np.isnan(t2)))
        debug_assert(not np.any(np.isnan(rv)))

    return rval
def entropy_binary_vector(P):
    """
    .. todo::

        WRITEME properly

    If P[i,j] represents the probability of some binary random variable X[i,j]
    being 1, then rval[i] gives the entropy of the random vector X[i,:]
    """

    for Pv in get_debug_values(P):
        assert Pv.min() >= 0.0
        assert Pv.max() <= 1.0

    oneMinusP = 1. - P

    PlogP = xlogx(P)
    omPlogOmP = xlogx(oneMinusP)

    term1 = - T.sum(PlogP, axis=1)
    assert len(term1.type.broadcastable) == 1

    term2 = - T.sum(omPlogOmP, axis=1)
    assert len(term2.type.broadcastable) == 1

    rval = term1 + term2

    debug_vals = get_debug_values(PlogP, omPlogOmP, term1, term2, rval)
    for plp, olo, t1, t2, rv in debug_vals:
        debug_assert(isfinite(plp))
        debug_assert(isfinite(olo))

        debug_assert(not contains_nan(t1))
        debug_assert(not contains_nan(t2))
        debug_assert(not contains_nan(rv))

    return rval
示例#5
0
 def test0(self):
     x = as_tensor_variable([1, 0])
     y = xlogx(x)
     f = theano.function([], [y])
     self.assertTrue(numpy.all(f() == numpy.asarray([0, 0.])))
示例#6
0
 def test0(self):
     x = as_tensor_variable([1, 0])
     y = xlogx(x)
     f = theano.function([], [y])
     self.assertTrue(numpy.all(f() == numpy.asarray([0, 0.])))