Exemplo n.º 1
0
    def with_linker_inplace(self, linker):
        for xsh, ysh in [((5, 5), (5, 5)),
                         ((5, 5), (1, 5)),
                         ((5, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = TensorType('float64', [(entry == 1) for entry in xsh])('x')
            y = TensorType('float64', [(entry == 1) for entry in ysh])('y')
            e = Elemwise(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = numpy.asarray(numpy.random.rand(*xsh))
            yv = numpy.asarray(numpy.random.rand(*ysh))
            zv = xv + yv

            f(xv, yv)

            self.assertTrue((xv == zv).all())
            #test Elemwise.infer_shape
            #the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = TensorType('float64', [(entry == 1) for entry in xsh])('x')
                y = TensorType('float64', [(entry == 1) for entry in ysh])('y')
                e = Elemwise(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
                f = copy(linker).accept(FunctionGraph([x,
                     y], [e.shape])).make_function()
                xv = numpy.asarray(numpy.random.rand(*xsh))
                yv = numpy.asarray(numpy.random.rand(*ysh))
                zv = xv + yv

                f(xv, yv)

                assert xv.shape == zv.shape
Exemplo n.º 2
0
    def test_ctors(self):

        if 0:
            # when using an implementation that handles scalars with
            # Scalar type
            assert shared(7).type == Scalar('int64')
            assert shared(7.0).type == Scalar('float64')
            assert shared(7, dtype='float64').type == Scalar('float64')

        else:
            if theano.gof.python_int_bitwidth() == 32:
                assert shared(7).type == theano.tensor.iscalar, shared(7).type
            else:
                assert shared(7).type == theano.tensor.lscalar, shared(7).type
            assert shared(7.0).type == theano.tensor.dscalar
            assert shared(numpy.float32(7)).type == theano.tensor.fscalar

        # test tensor constructor
        b = shared(numpy.zeros((5, 5), dtype='int32'))
        assert b.type == TensorType('int32', broadcastable=[False, False])
        b = shared(numpy.random.rand(4, 5))
        assert b.type == TensorType('float64', broadcastable=[False, False])
        b = shared(numpy.random.rand(5, 1, 2))
        assert b.type == TensorType('float64',
                                    broadcastable=[False, False, False])

        assert shared([]).type == generic

        def badfunc():
            shared(7, bad_kw=False)

        self.assertRaises(TypeError, badfunc)
Exemplo n.º 3
0
    def test_ctors(self):

        if theano.configdefaults.python_int_bitwidth() == 32:
            assert shared(7).type == theano.tensor.iscalar, shared(7).type
        else:
            assert shared(7).type == theano.tensor.lscalar, shared(7).type
        assert shared(7.0).type == theano.tensor.dscalar
        assert shared(np.float32(7)).type == theano.tensor.fscalar

        # test tensor constructor
        b = shared(np.zeros((5, 5), dtype="int32"))
        assert b.type == TensorType("int32", broadcastable=[False, False])
        b = shared(np.random.rand(4, 5))
        assert b.type == TensorType("float64", broadcastable=[False, False])
        b = shared(np.random.rand(5, 1, 2))
        assert b.type == TensorType("float64",
                                    broadcastable=[False, False, False])

        assert shared([]).type == generic

        def badfunc():
            shared(7, bad_kw=False)

        with pytest.raises(TypeError):
            badfunc()
Exemplo n.º 4
0
 def test_fill_grad(self):
     # Fix bug reported at
     # https://groups.google.com/d/topic/theano-users/nQshB8gUA6k/discussion
     x = TensorType(config.floatX, [0, 1, 0])('x')
     y = TensorType(config.floatX, [0, 1, 0])('y')
     e = tensor.second(x, y)
     theano.grad(e.sum(), y)
Exemplo n.º 5
0
def FreeVariable( name, shape, dtype = 'float64'):
    """creates a TensorVariable of the given shape and type"""
    shape = np.atleast_1d(shape)
    var = TensorType(str(dtype), shape == 1)(name)
    var.dshape = tuple(shape)
    var.dsize = int(np.prod(shape))
    return var
Exemplo n.º 6
0
 def test_hash_and_eq_params_type(self):
     w1 = ParamsType(a1=TensorType('int64', (False, False)),
                     a2=TensorType('int64',
                                   (False, True, False, False, True)),
                     a3=Generic())
     w2 = ParamsType(a1=TensorType('int64', (False, False)),
                     a2=TensorType('int64',
                                   (False, True, False, False, True)),
                     a3=Generic())
     assert w1 == w2
     assert not (w1 != w2)
     assert hash(w1) == hash(w2)
     assert w1.name == w2.name
     # Changing attributes names only.
     w2 = ParamsType(
         a1=TensorType('int64', (False, False)),
         other_name=TensorType(
             'int64',
             (False, True, False, False, True)),  # a2 -> other_name
         a3=Generic())
     assert w1 != w2
     # Changing attributes types only.
     w2 = ParamsType(
         a1=TensorType('int64', (False, False)),
         a2=Generic(),  # changing class
         a3=Generic())
     assert w1 != w2
     # Changing attributes types characteristics only.
     w2 = ParamsType(
         a1=TensorType('int64', (False, True)),  # changing broadcasting
         a2=TensorType('int64', (False, True, False, False, True)),
         a3=Generic())
     assert w1 != w2
Exemplo n.º 7
0
    def test_params_type_filtering(self):
        shape_tensor5 = (1, 2, 2, 3, 2)
        size_tensor5 = shape_tensor5[0] * shape_tensor5[1] * shape_tensor5[
            2] * shape_tensor5[3] * shape_tensor5[4]
        random_tensor = np.random.normal(
            size=size_tensor5).reshape(shape_tensor5)

        w = ParamsType(a1=TensorType('int32', (False, False)),
                       a2=TensorType('float64',
                                     (False, False, False, False, False)),
                       a3=Generic())

        # With a value that does not match the params type.
        o = Params(w,
                   a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                       12]]).astype('int64'),
                   a2=random_tensor.astype('float32'),
                   a3=2000)
        # should fail (o.a1 is not int32, o.a2 is not float64)
        self.assertRaises(TypeError, w.filter, o, True)
        # should fail (o.a1 is not int32, o.a2 is not float64, and downcast is disallowed)
        self.assertRaises(TypeError, w.filter, o, False, False)
        # Should pass.
        w.filter(o, strict=False, allow_downcast=True)

        # With a value that matches the params type.
        o1 = Params(w,
                    a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                        12]]).astype('int32'),
                    a2=random_tensor.astype('float64'),
                    a3=2000)
        # All should pass.
        w.filter(o1, strict=True)
        w.filter(o1, strict=False, allow_downcast=False)
        w.filter(o1, strict=False, allow_downcast=True)

        # Check values_eq and values_eq_approx.
        o2 = Params(w,
                    a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                        12]]).astype('int32'),
                    a2=random_tensor.astype('float64'),
                    a3=2000)
        assert w.values_eq(o1, o2)
        assert w.values_eq_approx(o1, o2)

        # Check value_eq_approx.
        # NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx.
        # So, I just play a little with float values.
        o3 = Params(w,
                    a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                        12]]).astype('int32'),
                    a2=(random_tensor.astype('float32') * 10 / 2.2 *
                        2.19999999999 / 10).astype('float64'),
                    a3=2000.0 - 0.00000000000000001)
        assert w.values_eq_approx(o1, o3)
Exemplo n.º 8
0
 def test_fill(self):
     if not theano.config.cxx:
         raise SkipTest("G++ not available, so we need to skip this test.")
     x = TensorType('float64', [0, 0])('x')
     y = TensorType('float64', [1, 1])('y')
     e = Elemwise(scalar.Second(scalar.transfer_type(0)), {0: 0})(x, y)
     f = gof.CLinker().accept(FunctionGraph([x, y], [e])).make_function()
     xv = numpy.ones((5, 5))
     yv = numpy.random.rand(1, 1)
     f(xv, yv)
     assert (xv == yv).all()
Exemplo n.º 9
0
 def test_weird_strides(self):
     if not theano.config.cxx:
         raise SkipTest("G++ not available, so we need to skip this test.")
     x = TensorType('float64', [0, 0, 0, 0, 0])('x')
     y = TensorType('float64', [0, 0, 0, 0, 0])('y')
     e = Elemwise(scalar.add)(x, y)
     f = gof.CLinker().accept(FunctionGraph([x, y], [e])).make_function()
     xv = numpy.random.rand(2, 2, 2, 2, 2)
     yv = numpy.random.rand(2, 2, 2, 2, 2).transpose(4, 0, 3, 1, 2)
     zv = xv + yv
     assert (f(xv, yv) == zv).all()
Exemplo n.º 10
0
def load_random_data_set(data_settings,
                         batchsize,
                         in_time,
                         in_channels,
                         in_width,
                         in_height,
                         round=None):
    cache_file = 'cache/set%s.p' % round
    if not round == None and os.path.isfile(cache_file):
        dataset = pickle.load(open(cache_file, "rb"))
        return dataset

    files_to_use = np.random.randint(len(data_settings['files']),
                                     size=batchsize)

    file_counts = {}
    for i in files_to_use:
        if data_settings['files'][i] in file_counts.keys():
            file_counts[data_settings['files'][i]] += 1
        else:
            file_counts[data_settings['files'][i]] = 1

    labels = [-1] * batchsize

    segments = np.zeros((batchsize, in_time, in_channels, in_height, in_width),
                        dtype=TensorType(theano.config.floatX, (False, ) * 5))

    counter = 0
    for i in file_counts.keys():
        print("Loading %d segments from %s which has %d frames" %
              (file_counts[i], i, data_settings['inverse_frame_map'][i]))
        frame_numbers = np.random.randint(
            data_settings['inverse_frame_map'][i] - in_time,
            size=file_counts[i])
        for j in frame_numbers:
            print("Chose to use frame number %d" % j)
            num_read, video_frames = read_frames_from_video(
                i, (in_width, in_height),
                in_channels,
                read_range=(j, j + in_time - 1))
            labels[counter] = data_settings['inverse_label_map'][i]
            segments[counter, :, :, :, :] = video_frames
            counter += 1

    if not epoch == None:
        labels = np.asarray(labels, dtype=TensorType(int_type, (False, )))
        dataset = pickle.dump((labels, segments), open(cache_file, "wb"))

    return (labels, segments)
Exemplo n.º 11
0
    def make_node(self, img, topgrad, shape=None):
        img = as_tensor_variable(img)
        topgrad = as_tensor_variable(topgrad)
        img, topgrad = self.as_common_dtype(img, topgrad)
        if img.type.ndim != 5:
            raise TypeError('img must be 5D tensor')
        if topgrad.type.ndim != 5:
            raise TypeError('topgrad must be 5D tensor')
        if self.subsample != (1, 1, 1) or self.border_mode == "half":
            if shape is None:
                raise ValueError(
                    'shape must be given if subsample != (1, 1, 1)'
                    ' or border_mode == "half"')
            height_width_depth = [
                as_tensor_variable(shape[0]).astype('int64'),
                as_tensor_variable(shape[1]).astype('int64'),
                as_tensor_variable(shape[2]).astype('int64')
            ]
        else:
            height_width_depth = []

        broadcastable = [
            topgrad.type.broadcastable[1], img.type.broadcastable[1], False,
            False, False
        ]
        dtype = img.type.dtype
        return Apply(self, [img, topgrad] + height_width_depth,
                     [TensorType(dtype, broadcastable)()])
Exemplo n.º 12
0
    def make_node(self, kern, topgrad, shape=None):
        kern = as_tensor_variable(kern)
        topgrad = as_tensor_variable(topgrad)
        kern, topgrad = self.as_common_dtype(kern, topgrad)
        if kern.type.ndim != 5:
            raise TypeError('kern must be 5D tensor')
        if topgrad.type.ndim != 5:
            raise TypeError('topgrad must be 5D tensor')
        if self.subsample != (1, 1, 1) and shape is None:
            raise ValueError('shape must be given if subsample != (1, 1, 1)')
        if self.subsample != (1, 1, 1):
            height_width_depth = [
                as_tensor_variable(shape[0]).astype('int64'),
                as_tensor_variable(shape[1]).astype('int64'),
                as_tensor_variable(shape[2]).astype('int64')
            ]
        else:
            height_width_depth = []

        broadcastable = [
            topgrad.type.broadcastable[0], kern.type.broadcastable[1], False,
            False, False
        ]
        dtype = kern.type.dtype
        return Apply(self, [kern, topgrad] + height_width_depth,
                     [TensorType(dtype, broadcastable)()])
Exemplo n.º 13
0
    def make_node(self, kern, topgrad, shape=None):
        kern = as_tensor_variable(kern)
        topgrad = as_tensor_variable(topgrad)
        kern, topgrad = self.as_common_dtype(kern, topgrad)
        if self.unshared is True:
            if kern.type.ndim != 6:
                raise TypeError("kern must be 6D tensor")
        else:
            if kern.type.ndim != 4:
                raise TypeError("kern must be 4D tensor")
        if topgrad.type.ndim != 4:
            raise TypeError("topgrad must be 4D tensor")
        if shape is None:
            if self.subsample != (1, 1):
                raise ValueError("shape must be given if subsample != (1, 1)")
            height_width = []
        else:
            height_width = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
            ]

        if self.num_groups > 1:
            broadcastable = [topgrad.type.broadcastable[0], False, False, False]
        else:
            broadcastable = [
                topgrad.type.broadcastable[0],
                kern.type.broadcastable[-3],
                False,
                False,
            ]
        dtype = kern.type.dtype
        return Apply(
            self, [kern, topgrad] + height_width, [TensorType(dtype, broadcastable)()]
        )
Exemplo n.º 14
0
 def test_basic(self):
     for type1 in [
             "uint8",
             "uint16",
             "uint32",
             "uint64",
             "int8",
             "int16",
             "int32",
             "int64",
             "float32",
             "float64",
     ]:
         x = TensorType(dtype=type1, broadcastable=(False, ))()
         for type2, converter in zip(
             ["int8", "int16", "int32", "int64", "float32", "float64"],
             [
                 _convert_to_int8,
                 _convert_to_int16,
                 _convert_to_int32,
                 _convert_to_int64,
                 _convert_to_float32,
                 _convert_to_float64,
             ],
         ):
             y = converter(x)
             f = function([In(x, strict=True)], y)
             a = np.arange(10, dtype=type1)
             b = f(a)
             assert np.all(b == np.arange(10, dtype=type2))
Exemplo n.º 15
0
    def test_infer_shape(self):

        for s_left, s_right in [((5, 6), (5, 6)), ((5, 6), (5, 1)),
                                ((5, 6), (1, 6)), ((5, 1), (5, 6)),
                                ((1, 6), (5, 6)), ((2, 3, 4, 5), (2, 3, 4, 5)),
                                ((2, 3, 4, 5), (2, 3, 1, 5)),
                                ((2, 3, 4, 5), (1, 3, 4, 5)),
                                ((2, 1, 4, 5), (2, 3, 4, 5)),
                                ((2, 3, 4, 1), (2, 3, 4, 5))]:
            dtype = theano.config.floatX
            t_left = TensorType(dtype, [(entry == 1) for entry in s_left])()
            t_right = TensorType(dtype, [(entry == 1) for entry in s_right])()
            t_left_val = numpy.zeros(s_left, dtype=dtype)
            t_right_val = numpy.zeros(s_right, dtype=dtype)
            self._compile_and_check([t_left, t_right],
                                    [Elemwise(scalar.add)(t_left, t_right)],
                                    [t_left_val, t_right_val], Elemwise)
Exemplo n.º 16
0
 def test_same_inputs(self):
     if not theano.config.cxx:
         raise SkipTest("G++ not available, so we need to skip this test.")
     x = TensorType('float64', [0, 0])('x')
     e = Elemwise(scalar.add)(x, x)
     f = gof.CLinker().accept(FunctionGraph([x], [e])).make_function()
     xv = numpy.random.rand(2, 2)
     zv = xv + xv
     assert (f(xv) == zv).all()
Exemplo n.º 17
0
    def make_theano_batch(self, name=None, dtype=None):
        if dtype is None:
            dtype = config.floatX

        broadcastable = [False] * 4
        broadcastable[self.axes.index('c')] = self.num_channels == 1
        broadcastable = tuple(broadcastable)

        return TensorType(dtype=dtype, broadcastable=broadcastable)(name=name)
Exemplo n.º 18
0
    def test_graphstructures_1(self):

        x = T.dmatrix('x')
        y = T.dmatrix('y')
        z = x + y

        x = T.matrix('x')
        y = T.matrix('y')
        z = T.matrix('z')

        # create 2 Variables (one for 'e', one intermediate for y*z)
        # create 2 Apply instances (one for '+', one for '*')
        e = x + y * z

        from theano.tensor import add, mul, Apply, Variable, TensorType

        # Instantiate a type that represents a matrix of doubles
        float64_matrix = TensorType(dtype='float64',               # double
                                    broadcastable=(False, False))  # matrix

        # We make the Variable instances we need.
        x = Variable(type=float64_matrix, name='x')
        y = Variable(type=float64_matrix, name='y')
        z = Variable(type=float64_matrix, name='z')

        # This is the Variable that we want to symbolically represents y*z
        mul_variable = Variable(type=float64_matrix)
        assert mul_variable.owner is None

        # Instantiate a symbolic multiplication
        node_mul = Apply(op=mul,
                         inputs=[y, z],
                         outputs=[mul_variable])
        # Fields 'owner' and 'index' are set by Apply
        assert mul_variable.owner is node_mul
        # 'index' is the position of mul_variable in mode_mul's outputs
        assert mul_variable.index == 0

        # This is the Variable that we want to symbolically represents x+(y*z)
        add_variable = Variable(type=float64_matrix)
        assert add_variable.owner is None

        # Instantiate a symbolic addition
        node_add = Apply(op=add,
                         inputs=[x, mul_variable],
                         outputs=[add_variable])
        # Fields 'owner' and 'index' are set by Apply
        assert add_variable.owner is node_add
        assert add_variable.index == 0

        e = add_variable

        # We have access to x, y and z through pointers
        assert e.owner.inputs[0] is x
        assert e.owner.inputs[1] is mul_variable
        assert e.owner.inputs[1].owner.inputs[0] is y
        assert e.owner.inputs[1].owner.inputs[1] is z
Exemplo n.º 19
0
 def test_infer_shape(self, dtype=None, pre_scalar_op=None):
     if dtype is None:
         dtype = theano.config.floatX
     for xsh, tosum in self.cases:
         x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
         if pre_scalar_op is not None:
             x = pre_scalar_op(x)
         if tosum is None:
             tosum = range(len(xsh))
         xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype)
         d = {}
         if pre_scalar_op is not None:
             xv = x.eval({x.owner.inputs[0]: xv})
             d = {pre_scalar_op: pre_scalar_op}
         self._compile_and_check([x],
                                 [self.op(scalar.add, axis=tosum, *d)(x)],
                                 [xv], self.op,
                                 ["local_cut_useless_reduce"],
                                 warn=0 not in xsh)
Exemplo n.º 20
0
 def test_infer_shape(self, dtype=None, pre_scalar_op=None):
     if dtype is None:
         dtype = theano.config.floatX
     for xsh, tosum in self.cases:
         x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
         if pre_scalar_op is not None:
             x = pre_scalar_op(x)
         if tosum is None:
             tosum = range(len(xsh))
         xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype)
         d = {}
         if pre_scalar_op is not None:
             xv = x.eval({x.owner.inputs[0]: xv})
             d = {pre_scalar_op: pre_scalar_op}
         self._compile_and_check([x],
                                 [self.op(scalar.add, axis=tosum, *d)(x)],
                                 [xv],
                                 self.op, ["local_cut_useless_reduce"],
                                 warn=0 not in xsh)
Exemplo n.º 21
0
 def test_infer_shape(self, dtype=None):
     if dtype is None:
         dtype = theano.config.floatX
     for xsh, tosum in self.cases:
         x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
         if tosum is None:
             tosum = range(len(xsh))
         xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype)
         self._compile_and_check([x], [self.op(scalar.add, axis=tosum)(x)],
                                 [xv],
                                 self.op, ["local_cut_useless_reduce"],
                                 warn=0 not in xsh)
Exemplo n.º 22
0
    def make_node(self, img, kern):
        img = as_tensor_variable(img)
        kern = as_tensor_variable(kern)
        if img.type.ndim != 4:
            raise TypeError('img must be 4D tensor')
        if kern.type.ndim != 4:
            raise TypeError('kern must be 4D tensor')

        broadcastable = [img.type.broadcastable[0], kern.type.broadcastable[0],
                         False, False]
        dtype = img.type.dtype
        return Apply(self, [img, kern], [TensorType(dtype, broadcastable)()])
Exemplo n.º 23
0
    def make_theano_batch(self, name=None, dtype=None, batch_size=None):
        if dtype is None:
            dtype = config.floatX

        broadcastable = [False] * 5
        broadcastable[self.axes.index('c')] = (self.num_channels == 1)
        broadcastable[self.axes.index('b')] = True
        broadcastable = tuple(broadcastable)

        rval = TensorType(dtype=dtype, broadcastable=broadcastable)(name=name)
        if config.compute_test_value != 'off':
            rval.tag.test_value = self.get_origin_batch(n=1)
Exemplo n.º 24
0
def test_cdata():
    i = TensorType('float32', (False, ))()
    c = ProdOp()(i)
    i2 = GetOp()(c)

    # This should be a passthrough function for vectors
    f = theano.function([i], i2)

    v = numpy.random.randn(9).astype('float32')

    v2 = f(v)
    assert (v2 == v).all()
Exemplo n.º 25
0
    def test_convert_to_complex(self):
        val64 = np.ones(3, dtype="complex64") + 0.5j
        val128 = np.ones(3, dtype="complex128") + 0.5j

        vec64 = TensorType("complex64", (False, ))()
        vec128 = TensorType("complex128", (False, ))()

        f = function([vec64], basic._convert_to_complex128(vec64))
        # we need to compare with the same type.
        assert vec64.type.values_eq_approx(val128, f(val64))

        f = function([vec128], basic._convert_to_complex128(vec128))
        assert vec64.type.values_eq_approx(val128, f(val128))

        f = function([vec64], basic._convert_to_complex64(vec64))
        assert vec64.type.values_eq_approx(val64, f(val64))

        f = function([vec128], basic._convert_to_complex64(vec128))
        assert vec128.type.values_eq_approx(val64, f(val128))

        # upcasting to complex128
        for t in ["int8", "int16", "int32", "int64", "float32", "float64"]:
            a = theano.shared(np.ones(3, dtype=t))
            b = theano.shared(np.ones(3, dtype="complex128"))
            f = function([], basic._convert_to_complex128(a))
            assert a.type.values_eq_approx(b.get_value(), f())

        # upcasting to complex64
        for t in ["int8", "int16", "int32", "int64", "float32"]:
            a = theano.shared(np.ones(3, dtype=t))
            b = theano.shared(np.ones(3, dtype="complex64"))
            f = function([], basic._convert_to_complex64(a))
            assert a.type.values_eq_approx(b.get_value(), f())

        # downcast to complex64
        for t in ["float64"]:
            a = theano.shared(np.ones(3, dtype=t))
            b = theano.shared(np.ones(3, dtype="complex64"))
            f = function([], basic._convert_to_complex64(a))
            assert a.type.values_eq_approx(b.get_value(), f())
Exemplo n.º 26
0
    def with_linker(self, linker):
        for xsh, shuffle, zsh in [((2, 3), (1, 'x', 0), (3, 1, 2)),
                                  ((1, 2, 3), (1, 2), (2, 3)),
                                  ((1, 2, 1, 3), (1, 3), (2, 3)),
                                  ((2, 3, 4), (2, 1, 0), (4, 3, 2)),
                                  ((2, 3, 4), ('x', 2, 1, 0, 'x'), (1, 4, 3, 2,
                                                                    1)),
                                  ((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
                                  ((1, 1, 4), (1, 2), (1, 4)),
                                  ((1, 1, 1), (), ()),
                                  ((1, ), ('x', 'x'), (1, 1))]:
            ib = [(entry == 1) for entry in xsh]
            x = TensorType('float64', ib)('x')
            e = self.op(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
            assert f(numpy.ones(xsh)).shape == zsh
            #test that DimShuffle.infer_shape work correctly
            x = TensorType('float64', ib)('x')
            e = self.op(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x],
                                                  [e.shape])).make_function()
            assert all(f(numpy.ones(xsh))) == all(zsh)

        # Test when we drop a axis that is not broadcastable
        ib = [False, True, False]
        x = TensorType('float64', ib)('x')
        self.assertRaises(ValueError, self.op, ib, shuffle)

        # Test when we drop a axis that don't have shape 1
        ib = [True, True, False]
        x = TensorType('float64', ib)('x')
        e = self.op(ib, (1, 2))(x)
        f = copy(linker).accept(FunctionGraph([x], [e.shape])).make_function()
        self.assertRaises(TypeError, f, numpy.ones((2, 1, 4)))

        # Test that we can't take a dimensions multiple time
        xsh, shuffle, zsh = ((1, 1, 4), (0, 1, 2, 0), (1, 4))
        ib = [False, True, False]
        x = TensorType('float64', ib)('x')
        self.assertRaises(ValueError, DimShuffle, ib, shuffle)
Exemplo n.º 27
0
    def test_infer_shape(self):

        for xsh, shuffle in [((2, 3), (1, 'x', 0)), ((1, 2, 3), (1, 2)),
                             ((1, 2, 1, 3), (1, 3)), ((2, 3, 4), (2, 1, 0)),
                             ((2, 3, 4), ('x', 2, 1, 0, 'x')),
                             ((1, 4, 3, 2, 1), (3, 2, 1)), ((1, 1, 4), (1, 2)),
                             ((1, 1, 1), ()), ((1, ), ('x', 'x'))]:
            ib = [(entry == 1) for entry in xsh]
            adtens = TensorType('float64', ib)('x')
            adtens_val = numpy.ones(xsh)
            self._compile_and_check([adtens],
                                    [DimShuffle(ib, shuffle)(adtens)],
                                    [adtens_val], DimShuffle)
Exemplo n.º 28
0
 def __init__(self, model):
     raise NotImplementedError("Repeating class not implemented yet!")
     # make sure the input model to repeat is a Model instance
     assert isinstance(
         model, Model
     ), "The initial model provided was type %s, not a Model." % str(
         type(model))
     self.model = model
     # make this input one dimension more than the provided Model's input (since we are repeating over the
     # first dimension)
     model_input = raise_to_list(self.model.get_inputs())[0]
     self.input = TensorType(model_input.dtype,
                             (False, ) * (model_input.ndim + 1))
Exemplo n.º 29
0
 def test_infer_shape(self):
     for xsh, tosum in [((5, 6), None), ((5, 6), (0, 1)), ((5, 6), (0, )),
                        ((5, 6), (1, )), ((5, 6), (-1, )), ((5, 6), (-2, )),
                        ((2, 3, 4, 5), (0, 1, 3)), ((2, 3, 4, 5), (-2, -3)),
                        ((5, 0), None), ((5, 0), (0, )), ((5, 0), (1, )),
                        ((5, 6), ()), ((5, 0), ()), ((), None), ((), ())]:
         dtype = theano.config.floatX
         x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
         if tosum is None:
             tosum = range(len(xsh))
         xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype)
         self._compile_and_check([x], [self.op(scalar.add, axis=tosum)(x)],
                                 [xv], self.op,
                                 ["local_cut_useless_reduce"])
Exemplo n.º 30
0
def test_cdata():
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")
    i = TensorType('float32', (False, ))()
    c = ProdOp()(i)
    i2 = GetOp()(c)

    # This should be a passthrough function for vectors
    f = theano.function([i], i2)

    v = numpy.random.randn(9).astype('float32')

    v2 = f(v)
    assert (v2 == v).all()
Exemplo n.º 31
0
def test_cdata():
    i = TensorType("float32", (False, ))()
    c = ProdOp()(i)
    i2 = GetOp()(c)
    mode = None
    if theano.config.mode == "FAST_COMPILE":
        mode = "FAST_RUN"

    # This should be a passthrough function for vectors
    f = theano.function([i], i2, mode=mode)

    v = np.random.randn(9).astype("float32")

    v2 = f(v)
    assert (v2 == v).all()
Exemplo n.º 32
0
    def _make_batch(self, is_symbolic, batch_size, dtype, name):

        # if 'b' not in self.axes:
        #     raise ValueError("This format has no batch ('b') axis.")

        if is_symbolic:
            assert_is(batch_size, None)
            # shape = list(self.shape)

            # # ok if batch_size is None
            # shape[self.axes.index('b')] = batch_size

            broadcastable = [False] * len(self.axes)
            # broadcastable = tuple(size == 1 for size in shape)

            # broadcastable = [False] * len(self.axes)
            # if 'f' in self.axes:
            #     f_index = self.axes.index('f')
            #     broadcastable[f_index] = (self.shape[f_index] == 1)

            # if 'b' in self.axes:
            #     broadcastable[self.axes.index('b')] = (batch_size == 1)

            # broadcastable = tuple(broadcastable)

            tensor_type = TensorType(dtype=dtype, broadcastable=broadcastable)
            result = tensor_type.make_variable(name=name)

            if theano.config.compute_test_value != 'off':
                test_batch_size = (None if 'b' not in self.axes
                                   else formats.test_batch_size)
                result.tag.test_value = self.make_batch(
                    is_symbolic=False,
                    batch_size=test_batch_size,
                    dtype=dtype)

                # if batch_size is None:
                #     raise ValueError("When theano.config.compute_test_values "
                #                      "is not 'off', you must supply a "
                #                      "batch_size argument even when making"
                #                      "symbolic batches.")
                # else:
                #     result.tag.test_value = \
                #         self.make_batch(is_symbolic=False,
                #                         batch_size=batch_size,
                #                         dtype=dtype)



                # Don't understand this, from
                # pylearn2.space.ConvSpace2D.make_theano_batch, but keep it
                # here in case it becomes clear later:

                # if batch_size == 1:
                #     n = 1
                # else:
                #     batch_size
                #     # TODO: try to extract constant scalar value
                #     #       from batch_size
                #     n = 4
                # rval.tag.test_value = self.get_origin_batch(batch_size=n,
                #                                             dtype=dtype)
            return result

            # This is what pylearn2.space.VectorSpace does, for efficiency
            # reasons, but IIRC people on the mailing list were often
            # complaining of breakages caused by batch type that changed from
            # tensor.row to tensor.matrix depending on the value of batch_size.
            # whether a batch was a row or a matrix. Seems like any
            # efficiency gains may be more trouble that they're worth.

            # if batch_size == 1:
            #     return theano.tensor.row(name=name, dtype=dtype)
            # else:
            #     return theano.tensor.matrix(name=name, dtype=dtype)
        else:  # i.e. is_symbolic == False
            if batch_size is None:
                assert_not_in('b', self.axes)
            else:
                assert_in('b',
                          self.axes,
                          ("batch_size argument provided ({}), but "
                           "this format has no batch "
                           "('b') axis.").format(batch_size))

            shape = list(self.shape)
            shape[self.axes.index('b')] = batch_size

            dtype = dtype if dtype is not None else self.dtype
            if dtype is None:
                raise ValueError("When self.dtype is None, you must provide a "
                                 "dtype argument to make_batch")

            return numpy.zeros(shape, dtype)
Exemplo n.º 33
0
                    if isinstance(exc, type(ref_e)):
                        return
                    else:
                        err_msg = ("Test %s::%s: exception raised during test "
                                   "call was not the same as the reference "
                                   "call (got: %s, expected %s)") % \
                                   (self.gpu_op, testname, type(exc),
                                    type(ref_e))
                        exc.args += (err_msg,)
                        raise

            for i, (variable, expected) in \
                    enumerate(izip(variables, expecteds)):
                if variable.dtype != expected.dtype or \
                        variable.shape != expected.shape or \
                        not TensorType.values_eq_approx(variable,
                                                        expected):
                    self.fail(("Test %s::%s: Output %s gave the wrong "
                               "value. With inputs %s, expected %s "
                               "(dtype %s), got %s (dtype %s).") % (
                            self.op, testname, i, inputs, expected,
                            expected.dtype, variable, variable.dtype))

            for description, check in self.checks.items():
                if not check(inputs, variables):
                    self.fail(("Test %s::%s: Failed check: %s "
                               "(inputs were %s, ouputs were %s)") %
                              (self.op, testname, description,
                               inputs, variables))

    Checker.__name__ = name
    return Checker
Exemplo n.º 34
0
        def run_case(self, testname, inputs):
            inputs_ref = [theano.shared(inp) for inp in inputs]
            inputs_tst = [theano.shared(inp) for inp in inputs]

            try:
                node_ref = safe_make_node(self.op, *inputs_ref)
                node_tst = safe_make_node(self.op, *inputs_tst)
            except Exception as exc:
                err_msg = ("Test %s::%s: Error occured while making "
                           "a node with inputs %s") % (self.gpu_op, testname,
                                                       inputs)
                exc.args += (err_msg,)
                raise

            try:
                f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
                f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
            except Exception as exc:
                err_msg = ("Test %s::%s: Error occured while trying to "
                           "make a Function") % (self.gpu_op, testname)
                exc.args += (err_msg,)
                raise

            self.assertFunctionContains1(f_tst, self.gpu_op)

            ref_e = None
            try:
                expecteds = f_ref()
            except Exception as exc:
                ref_e = exc

            try:
                variables = f_tst()
            except Exception as exc:
                if ref_e is None:
                    err_msg = ("Test %s::%s: exception when calling the "
                               "Function") % (self.gpu_op, testname)
                    exc.args += (err_msg,)
                    raise
                else:
                    # if we raised an exception of the same type we're good.
                    if isinstance(exc, type(ref_e)):
                        return
                    else:
                        err_msg = ("Test %s::%s: exception raised during test "
                                   "call was not the same as the reference "
                                   "call (got: %s, expected %s)") % \
                                   (self.gpu_op, testname, type(exc),
                                    type(ref_e))
                        exc.args += (err_msg,)
                        raise

            for i, (variable, expected) in \
                    enumerate(izip(variables, expecteds)):
                if variable.dtype != expected.dtype or \
                        variable.shape != expected.shape or \
                        not TensorType.values_eq_approx(variable,
                                                        expected):
                    self.fail(("Test %s::%s: Output %s gave the wrong "
                               "value. With inputs %s, expected %s "
                               "(dtype %s), got %s (dtype %s).") % (
                            self.op, testname, i, inputs, expected,
                            expected.dtype, variable, variable.dtype))

            for description, check in self.checks.items():
                if not check(inputs, variables):
                    self.fail(("Test %s::%s: Failed check: %s "
                               "(inputs were %s, ouputs were %s)") %
                              (self.op, testname, description,
                               inputs, variables))