Exemplo n.º 1
0
 def test_hash_and_eq_params(self):
     wp1 = ParamsType(a=Generic(),
                      array=TensorType('int64', (False, )),
                      floatting=Scalar('float64'),
                      npy_scalar=TensorType('float64', tuple()))
     wp2 = ParamsType(a=Generic(),
                      array=TensorType('int64', (False, )),
                      floatting=Scalar('float64'),
                      npy_scalar=TensorType('float64', tuple()))
     w1 = Params(wp1,
                 a=1,
                 array=np.asarray([1, 2, 4, 5, 7]),
                 floatting=-4.5,
                 npy_scalar=np.asarray(12))
     w2 = Params(wp2,
                 a=1,
                 array=np.asarray([1, 2, 4, 5, 7]),
                 floatting=-4.5,
                 npy_scalar=np.asarray(12))
     assert w1 == w2
     assert not (w1 != w2)
     assert hash(w1) == hash(w2)
     # Changing attributes names only (a -> other_name).
     wp2_other = ParamsType(other_name=Generic(),
                            array=TensorType('int64', (False, )),
                            floatting=Scalar('float64'),
                            npy_scalar=TensorType('float64', tuple()))
     w2 = Params(wp2_other,
                 other_name=1,
                 array=np.asarray([1, 2, 4, 5, 7]),
                 floatting=-4.5,
                 npy_scalar=np.asarray(12))
     assert w1 != w2
     # Changing attributes values only (now a=2).
     w2 = Params(wp2,
                 a=2,
                 array=np.asarray([1, 2, 4, 5, 7]),
                 floatting=-4.5,
                 npy_scalar=np.asarray(12))
     assert w1 != w2
     # Changing NumPy array values (5 -> -5).
     w2 = Params(wp2,
                 a=1,
                 array=np.asarray([1, 2, 4, -5, 7]),
                 floatting=-4.5,
                 npy_scalar=np.asarray(12))
     assert w1 != w2
Exemplo n.º 2
0
 def test_invalid_modes(self):
     # Modes 'r+', 'r', and 'w+' cannot work with Theano, becausei
     # the output array may be modified inplace, and that should not
     # modify the original file.
     path = Variable(Generic())
     for mmap_mode in ('r+', 'r', 'w+', 'toto'):
         self.assertRaises(ValueError, tensor.load, path, 'int32',
                           (False, ), mmap_mode)
Exemplo n.º 3
0
 def test0(self):
     path = Variable(Generic())
     # Not specifying mmap_mode defaults to None, and the data is
     # copied into main memory
     x = tensor.load(path, 'int32', (False, ))
     y = x * 2
     fn = function([path], y)
     assert (fn(self.filename) == (self.data * 2)).all()
Exemplo n.º 4
0
 def test_memmap(self):
     data = numpy.arange(5, dtype=numpy.int32)
     filename = os.path.join(theano.config.base_compiledir, "_test.npy")
     numpy.save(filename, data)
     path = Variable(Generic())
     x = tensor.load(path, 'int32', (False,), mmap_mode='r+')
     fn = function([path], x)
     assert type(fn(filename)) == numpy.core.memmap
Exemplo n.º 5
0
 def test_invalid_modes(self):
     # Modes 'r+', 'r', and 'w+' cannot work with Theano, becausei
     # the output array may be modified inplace, and that should not
     # modify the original file.
     path = Variable(Generic())
     for mmap_mode in ("r+", "r", "w+", "toto"):
         with pytest.raises(ValueError):
             tensor.load(path, "int32", (False, ), mmap_mode)
Exemplo n.º 6
0
 def test0(self):
     data = numpy.arange(5, dtype=numpy.int32)
     filename = os.path.join(theano.config.base_compiledir, "_test.npy")
     numpy.save(filename, data)
     path = Variable(Generic())
     x = tensor.load(path, 'int32', (False,))
     y = x*2
     fn = function([path], y)
     assert (fn(filename) == data*2).all()
Exemplo n.º 7
0
    def test_params_type_filtering(self):
        shape_tensor5 = (1, 2, 2, 3, 2)
        size_tensor5 = shape_tensor5[0] * shape_tensor5[1] * shape_tensor5[
            2] * shape_tensor5[3] * shape_tensor5[4]
        random_tensor = np.random.normal(
            size=size_tensor5).reshape(shape_tensor5)

        w = ParamsType(a1=TensorType('int32', (False, False)),
                       a2=TensorType('float64',
                                     (False, False, False, False, False)),
                       a3=Generic())

        # With a value that does not match the params type.
        o = Params(w,
                   a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                       12]]).astype('int64'),
                   a2=random_tensor.astype('float32'),
                   a3=2000)
        # should fail (o.a1 is not int32, o.a2 is not float64)
        self.assertRaises(TypeError, w.filter, o, True)
        # should fail (o.a1 is not int32, o.a2 is not float64, and downcast is disallowed)
        self.assertRaises(TypeError, w.filter, o, False, False)
        # Should pass.
        w.filter(o, strict=False, allow_downcast=True)

        # With a value that matches the params type.
        o1 = Params(w,
                    a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                        12]]).astype('int32'),
                    a2=random_tensor.astype('float64'),
                    a3=2000)
        # All should pass.
        w.filter(o1, strict=True)
        w.filter(o1, strict=False, allow_downcast=False)
        w.filter(o1, strict=False, allow_downcast=True)

        # Check values_eq and values_eq_approx.
        o2 = Params(w,
                    a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                        12]]).astype('int32'),
                    a2=random_tensor.astype('float64'),
                    a3=2000)
        assert w.values_eq(o1, o2)
        assert w.values_eq_approx(o1, o2)

        # Check value_eq_approx.
        # NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx.
        # So, I just play a little with float values.
        o3 = Params(w,
                    a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                        12]]).astype('int32'),
                    a2=(random_tensor.astype('float32') * 10 / 2.2 *
                        2.19999999999 / 10).astype('float64'),
                    a3=2000.0 - 0.00000000000000001)
        assert w.values_eq_approx(o1, o3)
Exemplo n.º 8
0
 def test1(self):
     path = Variable(Generic())
     # 'c' means "copy-on-write", which allow the array to be overwritten
     # by an inplace Op in the graph, without modifying the underlying
     # file.
     x = tensor.load(path, 'int32', (False, ), 'c')
     # x ** 2 has been chosen because it will work inplace.
     y = (x**2).sum()
     fn = function([path], y)
     # Call fn() twice, to check that inplace ops do not cause trouble
     assert (fn(self.filename) == (self.data**2).sum()).all()
     assert (fn(self.filename) == (self.data**2).sum()).all()
Exemplo n.º 9
0
 def test_hash_and_eq_params_type(self):
     w1 = ParamsType(a1=TensorType('int64', (False, False)),
                     a2=TensorType('int64',
                                   (False, True, False, False, True)),
                     a3=Generic())
     w2 = ParamsType(a1=TensorType('int64', (False, False)),
                     a2=TensorType('int64',
                                   (False, True, False, False, True)),
                     a3=Generic())
     assert w1 == w2
     assert not (w1 != w2)
     assert hash(w1) == hash(w2)
     assert w1.name == w2.name
     # Changing attributes names only.
     w2 = ParamsType(
         a1=TensorType('int64', (False, False)),
         other_name=TensorType(
             'int64',
             (False, True, False, False, True)),  # a2 -> other_name
         a3=Generic())
     assert w1 != w2
     # Changing attributes types only.
     w2 = ParamsType(
         a1=TensorType('int64', (False, False)),
         a2=Generic(),  # changing class
         a3=Generic())
     assert w1 != w2
     # Changing attributes types characteristics only.
     w2 = ParamsType(
         a1=TensorType('int64', (False, True)),  # changing broadcasting
         a2=TensorType('int64', (False, True, False, False, True)),
         a3=Generic())
     assert w1 != w2
Exemplo n.º 10
0
from __future__ import absolute_import, print_function, division
import theano
import numpy as np
from unittest import TestCase
from theano.gof import Op, COp, Apply
from theano import Generic
from theano.scalar import Scalar
from theano.tensor import TensorType
from theano.gof import ParamsType, Params, EnumList
from theano import tensor
from theano.tests import unittest_tools as utt

tensor_type_0d = TensorType('float64', tuple())
scalar_type = Scalar('float64')
generic_type = Generic()


# A test op to compute `y = a*x^2 + bx + c` for any tensor x, with a, b, c as op params.
class QuadraticOpFunc(Op):
    __props__ = ('a', 'b', 'c')
    params_type = ParamsType(a=tensor_type_0d, b=scalar_type, c=generic_type)

    def __init__(self, a, b, c):
        self.a = a
        self.b = b
        self.c = c

    def make_node(self, x):
        x = tensor.as_tensor_variable(x)
        return Apply(self, [x], [x.type()])
Exemplo n.º 11
0
 def test_memmap(self):
     path = Variable(Generic())
     x = tensor.load(path, 'int32', (False, ), mmap_mode='c')
     fn = function([path], x)
     assert type(fn(self.filename)) == numpy.core.memmap