def test_verify_grad_with_zeros(self): # including zeros, as the case with zeros is important # (and special cases: 1 zero in the row, more than 1 zero in the row) x_val = numpy.asarray([[1., 2., 3.], [0., 5., 6.], [0., 0., 9.]], dtype='float32') x = theano.tensor.dmatrix() # sanity check x2 = theano.tensor.dmatrix() p = Prod(axis=1)(x) p2 = Prod(axis=1)(x2) fn = theano.function([x, x2], [p - p2], mode=self.mode) #print "hand computed diff for each row" x2_val = numpy.asarray([[1., 2., 3.003], [0.003, 5., 6], [0., 0., 9.01]]) #print fn(x_val, x2_val) fn2 = theano.function([x], [theano.tensor.grad(p.sum(), x)], mode=self.mode) #print "real grad" #print fn2(x_val) fn3 = theano.function([x], [p], mode=self.mode) assert numpy.allclose(fn3(x_val), [6., 0., 0.]) # now with verify_grad unittest_tools.verify_grad(Prod(axis=1), [x_val], mode=self.mode)
def test_other_grad_tests(self): x = theano.tensor.dmatrix() x_val1 = numpy.array([[1, 2, 3], [0, 5, 6], [0, 0, 9]], dtype='float32') x_val2 = numpy.array([[1, 2, 0], [0, 5, 6], [7, 8, 9], [9, 10, 0]], dtype='float32') rng = rng = numpy.random.RandomState(43) p = Prod(axis=1) grad_p = theano.tensor.grad(p(x).sum(), x) grad_fn = theano.function([x], grad_p, mode=self.mode) assert numpy.allclose(grad_fn(x_val1), [[6., 3., 2.], [30., 0., 0.], [0., 0., 0.]]) assert numpy.allclose( grad_fn(x_val2), [[0., 0., 2.], [30., 0., 0.], [72., 63., 56.], [0., 0., 90.]]) p_axis0 = Prod(axis=0) grad_p_axis0 = theano.tensor.grad(p_axis0(x).sum(), x) grad_fn_axis0 = theano.function([x], grad_p_axis0, mode=self.mode) assert numpy.allclose( grad_fn_axis0(x_val2), [[0., 400., 0.], [63., 160., 0.], [0., 100., 0.], [0., 80., 0.]]) tensor.verify_grad(p, [x_val1], rng=rng, mode=self.mode)
def test_verify_grad_with_zeros(self): # including zeros, as the case with zeros is important # (and special cases: 1 zero in the row, more than 1 zero in the row) x_val = numpy.asarray([[1., 2., 3.], [0., 5., 6.], [0., 0., 9.]], dtype='float32') x = theano.tensor.dmatrix() # sanity check x2 = theano.tensor.dmatrix() p = Prod(axis=1)(x) p2 = Prod(axis=1)(x2) fn = theano.function([x, x2], [p - p2], mode=self.mode) #print "hand computed diff for each row" x2_val = numpy.asarray([[1., 2., 3.003], [0.003, 5., 6], [ 0., 0., 9.01]]) #print fn(x_val, x2_val) fn2 = theano.function([x], [theano.tensor.grad(p.sum(), x)], mode=self.mode) #print "real grad" #print fn2(x_val) fn3 = theano.function([x], [p], mode=self.mode) assert numpy.allclose(fn3(x_val), [6., 0., 0.]) # now with verify_grad unittest_tools.verify_grad(Prod(axis=1), [x_val], mode=self.mode)
def test_prod_no_zeros_in_input(self): x = theano.tensor.dmatrix() x_val = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='float32') pwz = Prod(axis=1, no_zeros_in_input=True)(x) fn = theano.function([x], pwz, mode=self.mode) assert numpy.allclose(fn(x_val), [6, 120, 504]) pwz = Prod(no_zeros_in_input=True)(x) g = theano.grad(pwz, x) gg = theano.grad(g.sum(), x) fn = theano.function([x], g, mode=self.mode) assert numpy.allclose( fn(x_val), [[362880., 181440., 120960.], [90720., 72576., 60480.], [51840., 45360., 40320.]]) fn = theano.function([x], gg, mode=self.mode) assert numpy.allclose( fn(x_val), [[663696., 422568., 301872.], [233964., 190800., 161016.], [139248., 122652., 109584.]]) unittest_tools.verify_grad(Prod(axis=1, no_zeros_in_input=True), [x_val], mode=self.mode) unittest_tools.verify_grad(Prod(no_zeros_in_input=True), [x_val], mode=self.mode) def second_deriv(x): return theano.grad(Prod(no_zeros_in_input=True)(x), x) unittest_tools.verify_grad(second_deriv, [x_val], mode=self.mode)
def test_verify_grad(self): # including zeros, as the case with zeros is important # (and special cases: 1 zero in the row, more than 1 zero in the row) x_val = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='float32') # now with verify_grad unittest_tools.verify_grad(Prod(axis=1), [x_val], mode=self.mode) # second time, with some added complexity # verify_grad takes the sum of the matrices anyway def fn(x2): return theano.tensor.sqr(Prod(axis=1)(x2)) unittest_tools.verify_grad(fn, [x_val], mode=self.mode)
def test_pickle_bug(self): # Regression test for bug fixed in 24d4fd291054. o = Prod() s = cPickle.dumps(o, protocol=-1) o = cPickle.loads(s) cPickle.dumps(o)
def fn(x2): return theano.tensor.sqr(Prod(axis=1)(x2))
def second_deriv(x): return theano.grad(Prod(no_zeros_in_input=True)(x), x)