예제 #1
0
    def __init__(self, tensor):
        self.tensor = tensor

    def __call__(self, input):
        """ Replaces the single input of symbolic variable to be the passed argument.

        Parameters
        ----------
        input : TensorVariable
        """
        oldinput, = inputvars(self.tensor)
        return theano.clone(self.tensor, {oldinput: input}, strict=False)


scalar_identity = IdentityOp(scalar.upgrade_to_float, name='scalar_identity')
identity = tt.Elemwise(scalar_identity, name='identity')


class GeneratorOp(Op):
    """
    Generaror Op is designed for storing python generators
    inside theano graph. The main limitation is generator itself.

    There are some important cases when generator becomes exhausted
        - not endless generator is passed
        - exception is raised while `generator.__next__` is performed
            Note: it is dangerous in simple python generators, but ok in
            custom class based generators with explicit state
        - data type on each iteration should be the same
    """
    __props__ = ('generator', )
예제 #2
0
        return [x_grad * self.grad_op(x)]


class I1e(UnaryScalarOp):
    """
    Modified Bessel function of the first kind of order 1, exponentially scaled.
    """

    nfunc_spec = ("scipy.special.i1e", 1, 1)

    def impl(self, x):
        return scipy.special.i1e(x)


i1e_scalar = I1e(upgrade_to_float_no_complex, name="i1e")
i1e = tt.Elemwise(i1e_scalar, name="Elemwise{i1e,no_inplace}")


class I0e(UnaryScalarOp):
    """
    Modified Bessel function of the first kind of order 0, exponentially scaled.
    """

    nfunc_spec = ("scipy.special.i0e", 1, 1)

    def impl(self, x):
        return scipy.special.i0e(x)

    def grad(self, inp, grads):
        (x, ) = inp
        (gz, ) = grads
예제 #3
0
    Regression test for unimplemented gradient in an Elemwise Op.
    """
    class TestOp(scalar.ScalarOp):
        def __init__(self):
            self.output_types_preference = scalar.upgrade_to_float

        def impl(self, n, x):
            return x * n

        def grad(self, (n, x), (gz, )):
            dy_dx = n
            return [
                theano.gradient.grad_not_implemented(self, 0, n), gz * dy_dx
            ]

    test_op = tensor.Elemwise(TestOp())
    x = tensor.scalar()
    # The call to `grad` used to crash.
    tensor.grad(test_op(2, x), x)
    # Verify that trying to use the not implemented gradient fails.
    try:
        tensor.grad(test_op(x, 2), x)
        assert False
    except theano.gradient.NullTypeGradError:
        pass


if __name__ == '__main__':

    t = TestElemwise('setUp')
    t.setUp()
예제 #4
0
def test_local_gpu_elemwise():
    """
    Test local_gpu_elemwise when there is a dtype upcastable to float32
    """
    a = tensor.bmatrix()
    b = tensor.fmatrix()
    c = tensor.fmatrix()

    a_v = (numpy.random.rand(4, 5) * 10).astype("int8")
    b_v = (numpy.random.rand(4, 5) * 10).astype("float32")
    c_v = (numpy.random.rand(4, 5) * 10).astype("float32")

    # Due to optimization order, this composite is created when all
    # the op are on the gpu.
    f = theano.function([a, b, c], a + b + c, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
    assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
    utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)

    # Now test with the composite already on the cpu before we move it
    # to the gpu
    a_s = theano.scalar.int8()
    b_s = theano.scalar.float32()
    c_s = theano.scalar.float32()
    out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])
    out_op = tensor.Elemwise(out_s)
    f = theano.function([a, b, c], out_op(a, b, c), mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
    assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
    utt.assert_allclose(f(a_v, b_v, c_v), a_v + b_v + c_v)

    return  # Not yet implemeted
    # Test multiple output
    a_s = theano.scalar.float32()
    a = tensor.fmatrix()
    from theano.scalar.basic import identity
    out_s = theano.scalar.Composite(
        [a_s, b_s, c_s],
        [identity(a_s), identity(c_s),
         identity(b_s)])
    outs_op = tensor.Elemwise(out_s)
    f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
    assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
    out = f(a_v, b_v, c_v)
    utt.assert_allclose(out[0], a_v)
    utt.assert_allclose(out[1], c_v)
    utt.assert_allclose(out[2], b_v)

    # Test multiple output
    out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s, a_s * b_s])
    outs_op = tensor.Elemwise(out_s)
    f = theano.function([a, b, c], outs_op(a, b, c), mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert sum(isinstance(node.op, GpuElemwise) for node in topo) == 1
    assert sum(type(node.op) == tensor.Elemwise for node in topo) == 0
    out = f(a_v, b_v, c_v)
    utt.assert_allclose(out[0], a_v + b_v)
    utt.assert_allclose(out[1], a_v * c_v)

    # Test non-contiguous input
    c = gpuarray_shared_constructor(numpy.asarray(c_v, dtype='float32'))
    f = theano.function([a, b],
                        outs_op(a[::2], b[::2], c[::2]),
                        mode=mode_with_gpu)
    out = f(a_v, b_v)
    utt.assert_allclose(out[0], a_v[::2] + b_v[::2])
    utt.assert_allclose(out[1], a_v[::2] * c_v[::2])
예제 #5
0
    def __init__(self, tensor):
        self.tensor = tensor

    def __call__(self, input):
        """Replaces the single input of symbolic variable to be the passed argument.

        Parameters
        ----------
        input: TensorVariable
        """
        (oldinput, ) = inputvars(self.tensor)
        return theano.clone(self.tensor, {oldinput: input}, strict=False)


scalar_identity = IdentityOp(scalar.upgrade_to_float, name="scalar_identity")
identity = tt.Elemwise(scalar_identity, name="identity")


class GeneratorOp(Op):
    """
    Generator Op is designed for storing python generators inside theano graph.

    __call__ creates TensorVariable
        It has 2 new methods
        - var.set_gen(gen): sets new generator
        - var.set_default(value): sets new default value (None erases default value)

    If generator is exhausted, variable will produce default value if it is not None,
    else raises `StopIteration` exception that can be caught on runtime.

    Parameters
예제 #6
0
        x, = inp
        z, = out
        if node.inputs[0].type in [scalar.float32, scalar.float64]:
            return """%(z)s =
                lgamma(%(x)s);""" % locals()
        raise NotImplementedError('only floatingpoint is implemented')

    def __eq__(self, other):
        return type(self) == type(other)

    def __hash__(self):
        return hash(type(self))


scalar_gammaln = GammaLn(scalar.upgrade_to_float, name='scalar_gammaln')
gammaln = tensor.Elemwise(scalar_gammaln, name='gammaln')


class Psi(scalar.UnaryScalarOp):
    """
    Compute derivative of gammaln(x)
    """
    @staticmethod
    def st_impl(x):
        return special.psi(x)

    def impl(self, x):
        return Psi.st_impl(x)

    #def grad()  no gradient now
예제 #7
0
파일: special.py 프로젝트: DoosanJung/pymc3
import numpy as np
import theano.tensor as tt
from theano.scalar.basic_scipy import GammaLn, Psi, I0, I1
from theano import scalar

__all__ = ['gammaln', 'multigammaln', 'psi', 'i0', 'i1']

scalar_gammaln = GammaLn(scalar.upgrade_to_float, name='scalar_gammaln')
gammaln = tt.Elemwise(scalar_gammaln, name='gammaln')


def multigammaln(a, p):
    """Multivariate Log Gamma

    :Parameters:
        a : tensor like
        p : int degrees of freedom
            p > 0
    """
    i = tt.arange(1, p + 1)
    return (p * (p - 1) * tt.log(np.pi) / 4.
            + tt.sum(gammaln(a + (1. - i) / 2.), axis=0))

scalar_psi = Psi(scalar.upgrade_to_float, name='scalar_psi')
psi = tt.Elemwise(scalar_psi, name='psi')

scalar_i0 = I0(scalar.upgrade_to_float, name='scalar_i0')
i0 = tt.Elemwise(scalar_i0, name='i0')

scalar_i1 = I1(scalar.upgrade_to_float, name='scalar_i1')
i1 = tt.Elemwise(scalar_i1, name='i1')
예제 #8
0
파일: special.py 프로젝트: nargess62/pymc-1
        x, = inp
        z, = out
        if node.inputs[0].type in [scalar.float32, scalar.float64]:
            return """%(z)s =
                lgamma(%(x)s);""" % locals()
        raise NotImplementedError('only floatingpoint is implemented')

    def __eq__(self, other):
        return type(self) == type(other)

    def __hash__(self):
        return hash(type(self))


scalar_gammaln = GammaLn(scalar.upgrade_to_float, name='scalar_gammaln')
gammaln = tensor.Elemwise(scalar_gammaln, name='gammaln')


def multigammaln(a, p):
    """Multivariate Log Gamma

    :Parameters:
        a : tensor like
        p : int degrees of freedom
            p > 0
    """
    a = t.shape_padright(a)
    i = arange(p)

    return p * (p - 1) * log(pi) / 4. + t.sum(gammaln(a + i / 2.),
                                              axis=a.ndim - 1)
예제 #9
0
파일: special.py 프로젝트: vitasiku/pymc3
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import numpy as np
import theano.tensor as tt
from theano.scalar.basic_scipy import GammaLn, Psi
from theano import scalar

__all__ = ['gammaln', 'multigammaln', 'psi', 'log_i0']

scalar_gammaln = GammaLn(scalar.upgrade_to_float, name='scalar_gammaln')
gammaln = tt.Elemwise(scalar_gammaln, name='gammaln')


def multigammaln(a, p):
    """Multivariate Log Gamma

    Parameters
    ----------
    a : tensor like
    p : int
       degrees of freedom. p > 0
    """
    i = tt.arange(1, p + 1)
    return (p * (p - 1) * tt.log(np.pi) / 4. +
            tt.sum(gammaln(a + (1. - i) / 2.), axis=0))
예제 #10
0
  return 1 -_cfrac(n, x) *exp(n *log(x) -x -logGamma(n));
}  /* GammaP() */


double Chi2SF (double k, double x)
{
    return 1 - GammaP(k/2., x/2.);
}
        """)
    
    
    def c_code(self, node, name, inp, out, sub):
        
        x, k = inp
        z, = out
        if node.inputs[0].type in [scalar.float32, scalar.float64]:
            return """%(z)s =
                Chi2SF(%(k)s, %(x)s);""" % locals()
        raise NotImplementedError('only floatingpoint is implemented')
    
    def __eq__(self, other):
        return type(self) == type(other)
    def __hash__(self):
        return hash(type(self))

    
scalar_chi2pv = Chi2PVop(scalar.upgrade_to_float)
chi2pv = tensor.Elemwise(scalar_chi2pv)


예제 #11
0
파일: special.py 프로젝트: xiaoxi0920/pymc3
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import numpy as np
import theano.tensor as tt
from theano.scalar.basic_scipy import GammaLn, Psi
from theano import scalar

__all__ = ["gammaln", "multigammaln", "psi", "log_i0"]

scalar_gammaln = GammaLn(scalar.upgrade_to_float, name="scalar_gammaln")
gammaln = tt.Elemwise(scalar_gammaln, name="gammaln")


def multigammaln(a, p):
    """Multivariate Log Gamma

    Parameters
    ----------
    a: tensor like
    p: int
       degrees of freedom. p > 0
    """
    i = tt.arange(1, p + 1)
    return p * (p - 1) * tt.log(np.pi) / 4.0 + tt.sum(
        gammaln(a + (1.0 - i) / 2.0), axis=0)