def test_MatrixSlice(): n = sympy.Symbol('n', integer=True) X = sympy.MatrixSymbol('X', n, n) Y = X[1:2:3, 4:5:6] Yt = theano_code(Y) from theano.scalar import Scalar from theano import Constant s = Scalar('int64') assert tuple(Yt.owner.op.idx_list) == (slice(s, s, s), slice(s, s, s)) assert Yt.owner.inputs[0] == theano_code(X) # == doesn't work in theano like it does in SymPy. You have to use # equals. assert [ i.equals(j) for i, j in zip(Yt.owner.inputs[1:], [ Constant(s, 1), Constant(s, 2), Constant(s, 3), Constant(s, 4), Constant(s, 5), Constant(s, 6), ]) ] k = sympy.Symbol('k') kt = theano_code(k, dtypes={k: 'int32'}) start, stop, step = 4, k, 2 Y = X[start:stop:step] Yt = theano_code(Y, dtypes={n: 'int32', k: 'int32'})
def test_hash_and_eq_params(self): wp1 = ParamsType(a=Generic(), array=TensorType('int64', (False, )), floatting=Scalar('float64'), npy_scalar=TensorType('float64', tuple())) wp2 = ParamsType(a=Generic(), array=TensorType('int64', (False, )), floatting=Scalar('float64'), npy_scalar=TensorType('float64', tuple())) w1 = Params(wp1, a=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12)) w2 = Params(wp2, a=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12)) assert w1 == w2 assert not (w1 != w2) assert hash(w1) == hash(w2) # Changing attributes names only (a -> other_name). wp2_other = ParamsType(other_name=Generic(), array=TensorType('int64', (False, )), floatting=Scalar('float64'), npy_scalar=TensorType('float64', tuple())) w2 = Params(wp2_other, other_name=1, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12)) assert w1 != w2 # Changing attributes values only (now a=2). w2 = Params(wp2, a=2, array=np.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=np.asarray(12)) assert w1 != w2 # Changing NumPy array values (5 -> -5). w2 = Params(wp2, a=1, array=np.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=np.asarray(12)) assert w1 != w2
def make_node(self, *inputs): inp = list(map(tensor.as_tensor_variable, inputs)) bc=[] if self.dimension == None: out = [Scalar(inp[0].type.dtype)('out')] else: for i in range(inp[0].ndim): if i == self.dimension or inp[0].broadcastable[i] == True: bc.append(True) else: bc.append(False) out = [tensor.TensorType(inp[0].type.dtype, bc)()] #if self.keepdim: #else: return gof.Apply(self, inp, out)
from __future__ import absolute_import, print_function, division import theano import numpy as np from unittest import TestCase from theano.gof import Op, COp, Apply from theano import Generic from theano.scalar import Scalar from theano.tensor import TensorType from theano.gof import ParamsType, Params, EnumList from theano import tensor from theano.tests import unittest_tools as utt tensor_type_0d = TensorType('float64', tuple()) scalar_type = Scalar('float64') generic_type = Generic() # A test op to compute `y = a*x^2 + bx + c` for any tensor x, with a, b, c as op params. class QuadraticOpFunc(Op): __props__ = ('a', 'b', 'c') params_type = ParamsType(a=tensor_type_0d, b=scalar_type, c=generic_type) def __init__(self, a, b, c): self.a = a self.b = b self.c = c def make_node(self, x): x = tensor.as_tensor_variable(x) return Apply(self, [x], [x.type()])
import numpy as np import pytest import theano from tests import unittest_tools as utt from theano import Generic, tensor from theano.gof import Apply, COp, EnumList, Op, Params, ParamsType from theano.scalar import Scalar from theano.tensor import TensorType tensor_type_0d = TensorType("float64", tuple()) scalar_type = Scalar("float64") generic_type = Generic() # A test op to compute `y = a*x^2 + bx + c` for any tensor x, with a, b, c as op params. class QuadraticOpFunc(Op): __props__ = ("a", "b", "c") params_type = ParamsType(a=tensor_type_0d, b=scalar_type, c=generic_type) def __init__(self, a, b, c): self.a = a self.b = b self.c = c def make_node(self, x): x = tensor.as_tensor_variable(x) return Apply(self, [x], [x.type()]) def perform(self, node, inputs, output_storage, coefficients): x = inputs[0]
def c_code(self, node, name, ins, outs, sub): # support old pickled graphs if len(ins) == 2: (pvals, unis) = ins n = 1 else: (pvals, unis, n) = ins (z,) = outs if self.odtype == "auto": t = f"PyArray_TYPE({pvals})" else: t = Scalar(self.odtype).dtype_specs()[1] if t.startswith("theano_complex"): t = t.replace("theano_complex", "NPY_COMPLEX") else: t = t.upper() fail = sub["fail"] return ( """ if (PyArray_NDIM(%(pvals)s) != 2) { PyErr_Format(PyExc_TypeError, "pvals ndim should be 2"); %(fail)s; } if (PyArray_NDIM(%(unis)s) != 1) { PyErr_Format(PyExc_TypeError, "unis ndim should be 2"); %(fail)s; } if (PyArray_DIMS(%(unis)s)[0] != (PyArray_DIMS(%(pvals)s)[0] * %(n)s)) { PyErr_Format(PyExc_ValueError, "unis.shape[0] != pvals.shape[0] * n"); %(fail)s; } if ((NULL == %(z)s) || ((PyArray_DIMS(%(z)s))[0] != (PyArray_DIMS(%(pvals)s))[0]) || ((PyArray_DIMS(%(z)s))[1] != (PyArray_DIMS(%(pvals)s))[1]) ) { Py_XDECREF(%(z)s); %(z)s = (PyArrayObject*) PyArray_EMPTY(2, PyArray_DIMS(%(pvals)s), %(t)s, 0); if (!%(z)s) { PyErr_SetString(PyExc_MemoryError, "failed to alloc z output"); %(fail)s; } } { // NESTED SCOPE const int nb_multi = PyArray_DIMS(%(pvals)s)[0]; const int nb_outcomes = PyArray_DIMS(%(pvals)s)[1]; const int n_samples = %(n)s; // // For each multinomial, loop over each possible outcome // for (int c = 0; c < n_samples; ++c){ for (int n = 0; n < nb_multi; ++n) { int waiting = 1; double cummul = 0.; const dtype_%(unis)s* unis_n = (dtype_%(unis)s*)PyArray_GETPTR1(%(unis)s, c*nb_multi + n); for (int m = 0; m < nb_outcomes; ++m) { dtype_%(z)s* z_nm = (dtype_%(z)s*)PyArray_GETPTR2(%(z)s, n,m); const dtype_%(pvals)s* pvals_nm = (dtype_%(pvals)s*)PyArray_GETPTR2(%(pvals)s, n,m); cummul += *pvals_nm; if (c == 0) { if (waiting && (cummul > *unis_n)) { *z_nm = 1.; waiting = 0; } else { // if we re-used old z pointer, we have to clear it out. *z_nm = 0.; } } else { if (cummul > *unis_n) { *z_nm = *z_nm + 1.; break; } } } } } } // END NESTED SCOPE """ % locals() )
def c_code(self, node, name, ins, outs, sub): (pvals, unis, n) = ins (z,) = outs replace = int(self.replace) if self.odtype == "auto": t = "NPY_INT64" else: t = Scalar(self.odtype).dtype_specs()[1] if t.startswith("theano_complex"): t = t.replace("theano_complex", "NPY_COMPLEX") else: t = t.upper() fail = sub["fail"] return ( """ // create a copy of pvals matrix PyArrayObject* pvals_copy = NULL; if (PyArray_NDIM(%(pvals)s) != 2) { PyErr_Format(PyExc_TypeError, "pvals ndim should be 2"); %(fail)s; } if (PyArray_NDIM(%(unis)s) != 1) { PyErr_Format(PyExc_TypeError, "unis ndim should be 2"); %(fail)s; } if ( %(n)s > (PyArray_DIMS(%(pvals)s)[1]) ) { PyErr_Format(PyExc_ValueError, "Cannot sample without replacement n samples bigger than the size of the distribution."); %(fail)s; } if (PyArray_DIMS(%(unis)s)[0] != (PyArray_DIMS(%(pvals)s)[0] * %(n)s)) { PyErr_Format(PyExc_ValueError, "unis.shape[0] != pvals.shape[0] * n"); %(fail)s; } pvals_copy = (PyArrayObject*) PyArray_EMPTY(2, PyArray_DIMS(%(pvals)s), PyArray_TYPE(%(pvals)s), 0); if (!pvals_copy) { PyErr_SetString(PyExc_MemoryError, "failed to alloc pvals_copy"); %(fail)s; } PyArray_CopyInto(pvals_copy, %(pvals)s); if ((NULL == %(z)s) || ((PyArray_DIMS(%(z)s))[0] != (PyArray_DIMS(%(pvals)s))[0]) || ((PyArray_DIMS(%(z)s))[1] != %(n)s) ) { Py_XDECREF(%(z)s); npy_intp dims[2]; dims[0] = PyArray_DIMS(%(pvals)s)[0]; dims[1] = %(n)s; %(z)s = (PyArrayObject*) PyArray_EMPTY(2, dims, %(t)s, -1); if (!%(z)s) { PyErr_SetString(PyExc_MemoryError, "failed to alloc z output"); %(fail)s; } } { // NESTED SCOPE const int nb_multi = PyArray_DIMS(%(pvals)s)[0]; const int nb_outcomes = PyArray_DIMS(%(pvals)s)[1]; const int n_samples = %(n)s; // // For each multinomial, loop over each possible outcome, // and set selected pval to 0 after being selected // for (int c = 0; c < n_samples; ++c){ for (int n = 0; n < nb_multi; ++n) { double cummul = 0.; const dtype_%(unis)s* unis_n = (dtype_%(unis)s*)PyArray_GETPTR1(%(unis)s, c*nb_multi + n); dtype_%(z)s* z_nc = (dtype_%(z)s*)PyArray_GETPTR2(%(z)s, n, c); for (int m = 0; m < nb_outcomes; ++m) { dtype_%(pvals)s* pvals_nm = (dtype_%(pvals)s*)PyArray_GETPTR2(pvals_copy, n, m); cummul += *pvals_nm; if (cummul > *unis_n) { *z_nc = m; // No need to renormalize after the last samples. if (c == (n_samples - 1)) break; if (! %(replace)s ) { // renormalize the nth row of pvals, reuse (cummul-*pvals_nm) to initialize the sum dtype_%(pvals)s sum = cummul - *pvals_nm; dtype_%(pvals)s* pvals_n = (dtype_%(pvals)s*)PyArray_GETPTR2(pvals_copy, n, m); *pvals_nm = 0.; for (int k = m; k < nb_outcomes; ++k) { sum = sum + *pvals_n; pvals_n++; } pvals_n = (dtype_%(pvals)s*)PyArray_GETPTR2(pvals_copy, n, 0); for (int k = 0; k < nb_outcomes; ++k) { *pvals_n = *pvals_n / sum; pvals_n++; } } break; } } } } // delete pvals_copy { Py_XDECREF(pvals_copy); } } // END NESTED SCOPE """ % locals() )