def test_prod_without_zeros_custom_acc_dtype(self): # Test ability to provide your own acc_dtype for a ProdWithoutZeros(). # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 for input_dtype in imap(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) for acc_dtype in imap(str, theano.scalar.all_types): axis = axes[idx % len(axes)] # If acc_dtype would force a downcast, we expect a TypeError # We always allow int/uint inputs with float/complex outputs. upcasted_dtype = scalar.upcast(input_dtype, acc_dtype) if acc_dtype == upcasted_dtype or ( input_dtype in tensor.discrete_dtypes and acc_dtype in tensor.continuous_dtypes ): prod_woz_var = ProdWithoutZeros(axis=axis, acc_dtype=acc_dtype)(x) assert prod_woz_var.owner.op.acc_dtype == acc_dtype if acc_dtype.startswith("complex") and input_dtype != acc_dtype: continue f = theano.function([x], prod_woz_var) data = numpy.random.rand(2, 3) * 3 data = data.astype(input_dtype) f(data) else: self.assertRaises(TypeError, ProdWithoutZeros(axis=axis, acc_dtype=acc_dtype), x) idx += 1
def simplify_mul(tree): """ Simplify a multiplication tree. :param tree: A multiplication tree (as output by `parse_mul_tree`). :return: A multiplication tree computing the same output as `tree` but without useless multiplications by 1 nor -1 (identified by leaves of the form [False, None] or [True, None] respectively). Useless multiplications (with less than two inputs) are also removed from the tree. """ neg, inputs = tree if isinstance(inputs, list): # Recurse through inputs. s_inputs = [] for s_i in imap(simplify_mul, inputs): if s_i[1] is None: # Multiplication by +/-1. neg ^= s_i[0] else: s_inputs.append(s_i) if not s_inputs: # The multiplication is empty. rval = [neg, None] elif len(s_inputs) == 1: # The multiplication has a single input. s_inputs[0][0] ^= neg rval = s_inputs[0] else: rval = [neg, s_inputs] else: rval = tree # print 'simplify_mul: %s -> %s' % (tree, rval) return rval
def test_prod_without_zeros_default_acc_dtype(self): # Test the default dtype of a ProdWithoutZeros(). # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) p = ProdWithoutZeros(axis=axis)(x) assert p.owner.op.acc_dtype == dict( bool="int64", int8="int64", int16="int64", int32="int64", uint8="uint64", uint16="uint64", uint32="uint64", float16="float32", float32="float64", complex64="complex128", ).get(dtype, dtype) if "complex" in dtype: continue f = theano.function([x], p) data = numpy.random.rand(2, 3) * 3 data = data.astype(dtype) f(data)
def test_prod_without_zeros_custom_dtype(self): # Test ability to provide your own output dtype for a ProdWithoutZeros(). # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 for input_dtype in imap(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) for output_dtype in imap(str, theano.scalar.all_types): axis = axes[idx % len(axes)] prod_woz_var = ProdWithoutZeros(axis=axis, dtype=output_dtype)(x) assert prod_woz_var.dtype == output_dtype idx += 1 if "complex" in output_dtype or "complex" in input_dtype: continue f = theano.function([x], prod_woz_var) data = numpy.random.rand(2, 3) * 3 data = data.astype(input_dtype) f(data)
def test_mean_custom_dtype(self): """ Test the ability to provide your own output dtype for a mean. """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 for input_dtype in imap(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) for sum_dtype in imap(str, theano.scalar.all_types): axis = axes[idx % len(axes)] # If the inner sum cannot be created, it will raise a # TypeError. try: mean_var = x.mean(dtype=sum_dtype, axis=axis) except TypeError: pass else: # Executed if no TypeError was raised if sum_dtype in tensor.discrete_dtypes and axis != []: assert mean_var.dtype == "float64", (mean_var.dtype, sum_dtype) else: assert mean_var.dtype == sum_dtype, (mean_var.dtype, sum_dtype) if ("complex" in input_dtype or "complex" in sum_dtype) and input_dtype != sum_dtype: continue f = theano.function([x], mean_var) data = numpy.random.rand(3, 4) * 10 data = data.astype(input_dtype) f(data) # Check that we can take the gradient, when implemented if "complex" in mean_var.dtype: continue try: tensor.grad(mean_var.sum(), x, disconnected_inputs="ignore") except NotImplementedError: # TrueDiv does not seem to have a gradient when # the numerator is complex. if mean_var.dtype in tensor.complex_dtypes: pass else: raise idx += 1
def test_prod_without_zeros_default_dtype(self): """ Test the default dtype of a ProdWithoutZeros(). """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = ProdWithoutZeros(axis=axis)(tensor.matrix(dtype=dtype)) assert x.dtype == dict( int8="int64", int16="int64", int32="int64", uint8="uint64", uint16="uint64", uint32="uint64" ).get(dtype, dtype)
def test_mean_default_dtype(self): # Test the default dtype of a mean(). # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) m = x.mean(axis=axis) if dtype in tensor.discrete_dtypes: assert m.dtype == "float64" else: assert m.dtype == dtype, (m, m.dtype, dtype) f = theano.function([x], m) data = numpy.random.rand(3, 4) * 10 data = data.astype(dtype) f(data)
def test_is_1pexp(self): backup = config.warn.identify_1pexp_bug config.warn.identify_1pexp_bug = False try: x = tensor.vector('x') exp = tensor.exp assert is_1pexp(1 + exp(x)) == (False, x) assert is_1pexp(exp(x) + 1) == (False, x) for neg, exp_arg in imap(is_1pexp, [(1 + exp(-x)), (exp(-x) + 1)]): assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x) assert is_1pexp(1 - exp(x)) is None assert is_1pexp(2 + exp(x)) is None assert is_1pexp(exp(x) + 2) is None assert is_1pexp(exp(x) - 1) is None assert is_1pexp(-1 + exp(x)) is None assert is_1pexp(1 + 2 * exp(x)) is None finally: config.warn.identify_1pexp_bug = backup
def test_is_1pexp(self): backup = config.warn.identify_1pexp_bug config.warn.identify_1pexp_bug = False try: x = tensor.vector('x') exp = tensor.exp assert is_1pexp(1 + exp(x), False) == (False, x) assert is_1pexp(exp(x) + 1, False) == (False, x) for neg, exp_arg in imap( lambda x: is_1pexp(x, only_process_constants=False), [(1 + exp(-x)), (exp(-x) + 1)]): assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x) assert is_1pexp(1 - exp(x), False) is None assert is_1pexp(2 + exp(x), False) is None assert is_1pexp(exp(x) + 2, False) is None assert is_1pexp(exp(x) - 1, False) is None assert is_1pexp(-1 + exp(x), False) is None assert is_1pexp(1 + 2 * exp(x), False) is None finally: config.warn.identify_1pexp_bug = backup
def test_is_1pexp(self): backup = config.warn.identify_1pexp_bug config.warn.identify_1pexp_bug = False try: x = tensor.vector('x') exp = tensor.exp assert is_1pexp(1 + exp(x), False) == (False, x) assert is_1pexp(exp(x) + 1, False) == (False, x) for neg, exp_arg in imap(lambda x: is_1pexp(x, only_process_constants=False), [(1 + exp(-x)), (exp(-x) + 1)]): assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x) assert is_1pexp(1 - exp(x), False) is None assert is_1pexp(2 + exp(x), False) is None assert is_1pexp(exp(x) + 2, False) is None assert is_1pexp(exp(x) - 1, False) is None assert is_1pexp(-1 + exp(x), False) is None assert is_1pexp(1 + 2 * exp(x), False) is None finally: config.warn.identify_1pexp_bug = backup