Exemplo n.º 1
0
    def test_bad_shape(self):
        # Test that at run time we raise an exception when the shape
        # is not the one specified
        specify_shape = SpecifyShape()

        x = vector()
        xval = np.random.rand(2).astype(config.floatX)
        f = theano.function([x], specify_shape(x, [2]), mode=self.mode)
        f(xval)
        xval = np.random.rand(3).astype(config.floatX)
        with pytest.raises(AssertionError):
            f(xval)

        assert isinstance(
            [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
            .inputs[0]
            .type,
            self.input_type,
        )

        x = matrix()
        xval = np.random.rand(2, 3).astype(config.floatX)
        f = theano.function([x], specify_shape(x, [2, 3]), mode=self.mode)
        assert isinstance(
            [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
            .inputs[0]
            .type,
            self.input_type,
        )
        f(xval)
        for shape_ in [(1, 3), (2, 2), (5, 5)]:
            xval = np.random.rand(*shape_).astype(config.floatX)
            with pytest.raises(AssertionError):
                f(xval)
Exemplo n.º 2
0
def check_force_gemv_init():
    if check_force_gemv_init._force_init_beta is None:
        """
        Test issue 1569.
        Namely when evaulating

            beta*aa + alpha*dot(xx, yy)

        where we set aa = betas = zeros of the correct dimensions we do not
        actually set aa = zeros and instead let the BLAS perform beta*aa with
        uninitialized memory for speed. Occasionally the memory contains values
        that are equivalent to NaN in which case the product beta*aa contains
        NaN's for correctly implemented BLAS libraries. In this situation, since
        we are introducing the NaN's, we need to test whether the BLAS performs
        correctly. If it *does*, i.e. it actually performs the multiplication
        beta*aa which will result in NaN's in the result, then we need intialize
        the memory to zeros.
        """
        tv = theano.config.compute_test_value
        tvo = theano.config.compute_test_value_opt
        theano.config.compute_test_value = 'off'
        theano.config.compute_test_value_opt = 'off'
        try:
            aa = T.vector('aa')
            yy = T.vector('yy')
            xx = T.matrix('xx')
            f = theano.function(
                [aa, yy, xx],
                gemv_no_inplace(aa, 1., xx, yy, 0.),
                theano.compile.Mode(optimizer='fast_compile').excluding('gpu',
                                                                        'gpuarray'),
                profile=False
                )
        finally:
            theano.config.compute_test_value = tv
            theano.config.compute_test_value_opt = tvo

        # Here we introduce NaNs into the data, if they are returned by the BLAS
        # then we want gemv_c_code to initiliaze the memory to 0 so that we
        # don't inadvertantly introduce NaNs to the users data.
        aa_data = numpy.array(
            float('NaN')*numpy.ones((2,)),
            dtype=theano.config.floatX
        )
        yy_data = numpy.array(
            numpy.ones((2,))*2,
            dtype=theano.config.floatX
        )
        xx_data = numpy.array(
            numpy.ones((2, 2)),
            dtype=theano.config.floatX
        )
        zz = f(aa_data, yy_data, xx_data)

        check_force_gemv_init._force_init_beta = numpy.isnan(zz).any()

    return check_force_gemv_init._force_init_beta
Exemplo n.º 3
0
def check_force_gemv_init():
    if check_force_gemv_init._force_init_beta is None:
        """
        Test issue 1569.
        Namely when evaulating

            beta*aa + alpha*dot(xx, yy)

        where we set aa = betas = zeros of the correct dimensions we do not
        actually set aa = zeros and instead let the BLAS perform beta*aa with
        uninitialized memory for speed. Occasionally the memory contains values
        that are equivalent to NaN in which case the product beta*aa contains
        NaN's for correctly implemented BLAS libraries. In this situation, since
        we are introducing the NaN's, we need to test whether the BLAS performs
        correctly. If it *does*, i.e. it actually performs the multiplication
        beta*aa which will result in NaN's in the result, then we need intialize
        the memory to zeros.
        """
        tv = theano.config.compute_test_value
        tvo = theano.config.compute_test_value_opt
        theano.config.compute_test_value = 'off'
        theano.config.compute_test_value_opt = 'off'
        try:
            aa = T.vector('aa')
            yy = T.vector('yy')
            xx = T.matrix('xx')
            f = theano.function(
                [aa, yy, xx],
                gemv_no_inplace(aa, 1., xx, yy, 0.),
                theano.compile.Mode(optimizer='fast_compile').excluding(
                    'gpu', 'gpuarray'),
                profile=False)
        finally:
            theano.config.compute_test_value = tv
            theano.config.compute_test_value_opt = tvo

        # Here we introduce NaNs into the data, if they are returned by the BLAS
        # then we want gemv_c_code to initiliaze the memory to 0 so that we
        # don't inadvertantly introduce NaNs to the users data.
        aa_data = numpy.array(float('NaN') * numpy.ones((2, )),
                              dtype=theano.config.floatX)
        yy_data = numpy.array(numpy.ones((2, )) * 2,
                              dtype=theano.config.floatX)
        xx_data = numpy.array(numpy.ones((2, 2)), dtype=theano.config.floatX)
        zz = f(aa_data, yy_data, xx_data)

        check_force_gemv_init._force_init_beta = numpy.isnan(zz).any()

    return check_force_gemv_init._force_init_beta
Exemplo n.º 4
0
    def test_local_hard_sigmoid(self):
        x = tensor.matrix('x')
        s = sigmoid(x)

        mode = self.get_mode('local_hard_sigmoid')
        f = theano.function([x], s, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert topo[0].op == sigmoid
        assert len(topo) == 1

        mode = self.get_mode().including('local_hard_sigmoid')
        f = theano.function([x], s, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) > 1
        assert not any([n.op == sigmoid for n in topo])
        ux_v = f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])
Exemplo n.º 5
0
    def test_local_ultra_fast_sigmoid(self):
        x = tensor.matrix('x')
        s = sigmoid(x)

        mode = self.get_mode('local_ultra_fast_sigmoid')
        f = theano.function([x], s, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == sigmoid

        mode = self.get_mode().including('local_ultra_fast_sigmoid')
        f = theano.function([x], s, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert topo[0].op == ultra_fast_sigmoid
        assert len(topo) == 1
        ux_v = f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])
Exemplo n.º 6
0
    def test_local_ultra_fast_sigmoid(self):
        x = tensor.matrix("x")
        s = sigmoid(x)

        mode = self.get_mode("local_ultra_fast_sigmoid")
        f = theano.function([x], s, mode=mode)
        assert check_stack_trace(f, ops_to_check=sigmoid)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == sigmoid

        mode = self.get_mode().including("local_ultra_fast_sigmoid")
        f = theano.function([x], s, mode=mode)
        assert check_stack_trace(f, ops_to_check=ultra_fast_sigmoid)
        topo = f.maker.fgraph.toposort()
        assert topo[0].op == ultra_fast_sigmoid
        assert len(topo) == 1
        f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])
Exemplo n.º 7
0
    def test_bad_number_of_shape(self):
        # Test that the number of dimensions provided is good
        specify_shape = SpecifyShape()

        x = vector()
        shape_vec = ivector()
        xval = np.random.rand(2).astype(config.floatX)
        with pytest.raises(AssertionError):
            specify_shape(x, [])
        with pytest.raises(AssertionError):
            specify_shape(x, [2, 2])

        f = theano.function([x, shape_vec],
                            specify_shape(x, shape_vec),
                            mode=self.mode)
        assert isinstance(
            [
                n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, SpecifyShape)
            ][0].inputs[0].type,
            self.input_type,
        )
        with pytest.raises(AssertionError):
            f(xval, [])
        with pytest.raises(AssertionError):
            f(xval, [2, 2])

        x = matrix()
        xval = np.random.rand(2, 3).astype(config.floatX)
        for shape_ in [(), (1, ), (2, 3, 4)]:
            with pytest.raises(AssertionError):
                specify_shape(x, shape_)
            f = theano.function([x, shape_vec],
                                specify_shape(x, shape_vec),
                                mode=self.mode)
            assert isinstance(
                [
                    n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, SpecifyShape)
                ][0].inputs[0].type,
                self.input_type,
            )
            with pytest.raises(AssertionError):
                f(xval, shape_)
Exemplo n.º 8
0
    def test_grad_log1msigm(self):
        # At some point, this returned nan, because (1 - sigm(x)) was
        # on both the numerator and the denominator of a fraction,
        # but the two nodes in question had not been merged.
        x = tensor.matrix('x')
        lr = tensor.scalar('lr')

        s = sigmoid(x)
        l = T.log(1 - s)
        c = l.mean()
        ux = x - lr * theano.grad(c, x)

        # Before the optimization, inf and NaN will be produced in the graph,
        # and DebugMode will complain. Everything is fine afterwards.
        mode = self.get_mode()
        if not isinstance(mode, theano.compile.DebugMode):
            f = theano.function([x, lr], ux, mode=mode)
            ux_v = f([[50]], 0.1)
            assert not np.isnan(ux_v)
Exemplo n.º 9
0
    def test_grad_log1msigm(self):
        # At some point, this returned nan, because (1 - sigm(x)) was
        # on both the numerator and the denominator of a fraction,
        # but the two nodes in question had not been merged.
        x = tensor.matrix('x')
        lr = tensor.scalar('lr')

        s = sigmoid(x)
        l = T.log(1 - s)
        c = l.mean()
        ux = x - lr * theano.grad(c, x)

        # Before the optimization, inf and NaN will be produced in the graph,
        # and DebugMode will complain. Everything is fine afterwards.
        mode = self.get_mode()
        if not isinstance(mode, theano.compile.DebugMode):
            f = theano.function([x, lr], ux, mode=mode)
            ux_v = f([[50]], 0.1)
            assert not numpy.isnan(ux_v)
Exemplo n.º 10
0
    def test_local_hard_sigmoid(self):
        x = tensor.matrix('x')
        s = sigmoid(x)

        mode = self.get_mode('local_hard_sigmoid')
        f = theano.function([x], s, mode=mode)
        assert check_stack_trace(f, ops_to_check=sigmoid)
        topo = f.maker.fgraph.toposort()
        assert topo[0].op == sigmoid
        assert len(topo) == 1

        mode = self.get_mode().including('local_hard_sigmoid')
        f = theano.function([x], s, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert not any([n.op == sigmoid for n in topo])
        f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])

        mode2 = mode.excluding('fusion').excluding('inplace')
        f2 = theano.function([x], s, mode=mode2)
        self.assertTrue(check_stack_trace(f2, ops_to_check=theano.tensor.clip))
Exemplo n.º 11
0
    def test_local_hard_sigmoid(self):
        x = tensor.matrix('x')
        s = sigmoid(x)

        mode = self.get_mode('local_hard_sigmoid')
        f = theano.function([x], s, mode=mode)
        assert check_stack_trace(f, ops_to_check=sigmoid)
        topo = f.maker.fgraph.toposort()
        assert topo[0].op == sigmoid
        assert len(topo) == 1

        mode = self.get_mode().including('local_hard_sigmoid')
        f = theano.function([x], s, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert not any([n.op == sigmoid for n in topo])
        f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])

        mode2 = mode.excluding('fusion').excluding('inplace')
        f2 = theano.function([x], s, mode=mode2)
        self.assertTrue(check_stack_trace(f2, ops_to_check=theano.tensor.clip))