Exemplo n.º 1
0
def check_force_gemv_init():
    if check_force_gemv_init._force_init_beta is None:
        """
        Test issue 1569.
        Namely when evaulating

            beta*aa + alpha*dot(xx, yy)

        where we set aa = betas = zeros of the correct dimensions we do not
        actually set aa = zeros and instead let the BLAS perform beta*aa with
        uninitialized memory for speed. Occasionally the memory contains values
        that are equivalent to NaN in which case the product beta*aa contains
        NaN's for correctly implemented BLAS libraries. In this situation, since
        we are introducing the NaN's, we need to test whether the BLAS performs
        correctly. If it *does*, i.e. it actually performs the multiplication
        beta*aa which will result in NaN's in the result, then we need intialize
        the memory to zeros.
        """
        tv = theano.config.compute_test_value
        tvo = theano.config.compute_test_value_opt
        theano.config.compute_test_value = 'off'
        theano.config.compute_test_value_opt = 'off'
        try:
            aa = T.vector('aa')
            yy = T.vector('yy')
            xx = T.matrix('xx')
            f = theano.function(
                [aa, yy, xx],
                gemv_no_inplace(aa, 1., xx, yy, 0.),
                theano.compile.Mode(optimizer='fast_compile').excluding('gpu',
                                                                        'gpuarray'),
                profile=False
                )
        finally:
            theano.config.compute_test_value = tv
            theano.config.compute_test_value_opt = tvo

        # Here we introduce NaNs into the data, if they are returned by the BLAS
        # then we want gemv_c_code to initiliaze the memory to 0 so that we
        # don't inadvertantly introduce NaNs to the users data.
        aa_data = numpy.array(
            float('NaN')*numpy.ones((2,)),
            dtype=theano.config.floatX
        )
        yy_data = numpy.array(
            numpy.ones((2,))*2,
            dtype=theano.config.floatX
        )
        xx_data = numpy.array(
            numpy.ones((2, 2)),
            dtype=theano.config.floatX
        )
        zz = f(aa_data, yy_data, xx_data)

        check_force_gemv_init._force_init_beta = numpy.isnan(zz).any()

    return check_force_gemv_init._force_init_beta
Exemplo n.º 2
0
def check_force_gemv_init():
    if check_force_gemv_init._force_init_beta is None:
        """
        Test issue 1569.
        Namely when evaulating

            beta*aa + alpha*dot(xx, yy)

        where we set aa = betas = zeros of the correct dimensions we do not
        actually set aa = zeros and instead let the BLAS perform beta*aa with
        uninitialized memory for speed. Occasionally the memory contains values
        that are equivalent to NaN in which case the product beta*aa contains
        NaN's for correctly implemented BLAS libraries. In this situation, since
        we are introducing the NaN's, we need to test whether the BLAS performs
        correctly. If it *does*, i.e. it actually performs the multiplication
        beta*aa which will result in NaN's in the result, then we need intialize
        the memory to zeros.
        """
        tv = theano.config.compute_test_value
        tvo = theano.config.compute_test_value_opt
        theano.config.compute_test_value = 'off'
        theano.config.compute_test_value_opt = 'off'
        try:
            aa = T.vector('aa')
            yy = T.vector('yy')
            xx = T.matrix('xx')
            f = theano.function(
                [aa, yy, xx],
                gemv_no_inplace(aa, 1., xx, yy, 0.),
                theano.compile.Mode(optimizer='fast_compile').excluding(
                    'gpu', 'gpuarray'),
                profile=False)
        finally:
            theano.config.compute_test_value = tv
            theano.config.compute_test_value_opt = tvo

        # Here we introduce NaNs into the data, if they are returned by the BLAS
        # then we want gemv_c_code to initiliaze the memory to 0 so that we
        # don't inadvertantly introduce NaNs to the users data.
        aa_data = numpy.array(float('NaN') * numpy.ones((2, )),
                              dtype=theano.config.floatX)
        yy_data = numpy.array(numpy.ones((2, )) * 2,
                              dtype=theano.config.floatX)
        xx_data = numpy.array(numpy.ones((2, 2)), dtype=theano.config.floatX)
        zz = f(aa_data, yy_data, xx_data)

        check_force_gemv_init._force_init_beta = numpy.isnan(zz).any()

    return check_force_gemv_init._force_init_beta
Exemplo n.º 3
0
    def test_bad_shape(self):
        # Test that at run time we raise an exception when the shape
        # is not the one specified
        specify_shape = SpecifyShape()

        x = vector()
        xval = np.random.rand(2).astype(config.floatX)
        f = theano.function([x], specify_shape(x, [2]), mode=self.mode)
        f(xval)
        xval = np.random.rand(3).astype(config.floatX)
        with pytest.raises(AssertionError):
            f(xval)

        assert isinstance(
            [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
            .inputs[0]
            .type,
            self.input_type,
        )

        x = matrix()
        xval = np.random.rand(2, 3).astype(config.floatX)
        f = theano.function([x], specify_shape(x, [2, 3]), mode=self.mode)
        assert isinstance(
            [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
            .inputs[0]
            .type,
            self.input_type,
        )
        f(xval)
        for shape_ in [(1, 3), (2, 2), (5, 5)]:
            xval = np.random.rand(*shape_).astype(config.floatX)
            with pytest.raises(AssertionError):
                f(xval)
Exemplo n.º 4
0
 def test_is_1pexp(self):
     backup = config.warn.identify_1pexp_bug
     config.warn.identify_1pexp_bug = False
     try:
         x = tensor.vector('x')
         exp = tensor.exp
         assert is_1pexp(1 + exp(x)) == (False, x)
         assert is_1pexp(exp(x) + 1) == (False, x)
         for neg, exp_arg in imap(is_1pexp, [(1 + exp(-x)), (exp(-x) + 1)]):
             assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x)
         assert is_1pexp(1 - exp(x)) is None
         assert is_1pexp(2 + exp(x)) is None
         assert is_1pexp(exp(x) + 2) is None
         assert is_1pexp(exp(x) - 1) is None
         assert is_1pexp(-1 + exp(x)) is None
         assert is_1pexp(1 + 2 * exp(x)) is None
     finally:
         config.warn.identify_1pexp_bug = backup
Exemplo n.º 5
0
    def test_bad_number_of_shape(self):
        # Test that the number of dimensions provided is good
        specify_shape = SpecifyShape()

        x = vector()
        shape_vec = ivector()
        xval = np.random.rand(2).astype(config.floatX)
        with pytest.raises(AssertionError):
            specify_shape(x, [])
        with pytest.raises(AssertionError):
            specify_shape(x, [2, 2])

        f = theano.function([x, shape_vec],
                            specify_shape(x, shape_vec),
                            mode=self.mode)
        assert isinstance(
            [
                n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, SpecifyShape)
            ][0].inputs[0].type,
            self.input_type,
        )
        with pytest.raises(AssertionError):
            f(xval, [])
        with pytest.raises(AssertionError):
            f(xval, [2, 2])

        x = matrix()
        xval = np.random.rand(2, 3).astype(config.floatX)
        for shape_ in [(), (1, ), (2, 3, 4)]:
            with pytest.raises(AssertionError):
                specify_shape(x, shape_)
            f = theano.function([x, shape_vec],
                                specify_shape(x, shape_vec),
                                mode=self.mode)
            assert isinstance(
                [
                    n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, SpecifyShape)
                ][0].inputs[0].type,
                self.input_type,
            )
            with pytest.raises(AssertionError):
                f(xval, shape_)
Exemplo n.º 6
0
 def test_is_1pexp(self):
     backup = config.warn.identify_1pexp_bug
     config.warn.identify_1pexp_bug = False
     try:
         x = tensor.vector('x')
         exp = tensor.exp
         assert is_1pexp(1 + exp(x), False) == (False, x)
         assert is_1pexp(exp(x) + 1, False) == (False, x)
         for neg, exp_arg in imap(
                 lambda x: is_1pexp(x, only_process_constants=False),
             [(1 + exp(-x)), (exp(-x) + 1)]):
             assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x)
         assert is_1pexp(1 - exp(x), False) is None
         assert is_1pexp(2 + exp(x), False) is None
         assert is_1pexp(exp(x) + 2, False) is None
         assert is_1pexp(exp(x) - 1, False) is None
         assert is_1pexp(-1 + exp(x), False) is None
         assert is_1pexp(1 + 2 * exp(x), False) is None
     finally:
         config.warn.identify_1pexp_bug = backup
Exemplo n.º 7
0
 def test_is_1pexp(self):
     backup = config.warn.identify_1pexp_bug
     config.warn.identify_1pexp_bug = False
     try:
         x = tensor.vector('x')
         exp = tensor.exp
         assert is_1pexp(1 + exp(x), False) == (False, x)
         assert is_1pexp(exp(x) + 1, False) == (False, x)
         for neg, exp_arg in imap(lambda x:
                                  is_1pexp(x, only_process_constants=False),
                                  [(1 + exp(-x)), (exp(-x) + 1)]):
             assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x)
         assert is_1pexp(1 - exp(x), False) is None
         assert is_1pexp(2 + exp(x), False) is None
         assert is_1pexp(exp(x) + 2, False) is None
         assert is_1pexp(exp(x) - 1, False) is None
         assert is_1pexp(-1 + exp(x), False) is None
         assert is_1pexp(1 + 2 * exp(x), False) is None
     finally:
         config.warn.identify_1pexp_bug = backup