def inner(*args): args = garray_to_cudandarray_nested(args) res = f(*args) if isinstance(res, list): res = cudandarray_to_garray_nested(res) else: # TODO: check for CudaNdArray instance instead if not isinstance(res, (float, np.ndarray)): res = gput.cudandarray_to_garray(res) return res
def Run_fmin_cg(self, maxiter): x0 = self.classifier.flat_params.get_value(borrow=True, return_internal_type=True) x0 = gnumpy_utils.cudandarray_to_garray(x0) f_op, params_opt = fmin_cg(f=self.ObjFun, x0=x0, fprime=self.ObjFunPrime, maxiter=maxiter) params_opt = gnumpy_utils.garray_to_cudandarray(params_opt) self.classifier.flat_params.set_value(params_opt)
def inner(*args): args = garray_to_cudandarray_nested(args) res = f(*args) if isinstance(res, list): res = cudandarray_to_garray_nested(res) else: # TODO: check for CudaNdArray instance instead if not isinstance(res, (float, np.ndarray)): res = gput.cudandarray_to_garray(res, copyif=True) #GWJ-True -allows copy of contiguous memory return res
def inner(*args): args = garray_to_cudandarray_nested(args) res = f(*args) #print "res:", res #return res #return None if isinstance(res, list): res = cudandarray_to_garray_nested(res) else: # TODO: check for CudaNdArray instance instead if not isinstance(res, (float, np.ndarray)): res = gput.cudandarray_to_garray(res, copyif=True) return res
def inner(*args): args = garray_to_cudandarray_nested(args) res = f(*args) # print "res:", res # return res # return None if isinstance(res, list): res = cudandarray_to_garray_nested(res) else: # TODO: check for CudaNdArray instance instead if not isinstance(res, (float, np.ndarray)): res = gput.cudandarray_to_garray(res, copyif=True) return res
def test(shape=(3,4,5)): """ Make sure that the gnumpy conversion is exact. """ gpu = theano.sandbox.cuda.basic_ops.gpu_from_host U = gpu(theano.tensor.ftensor3('U')) ii = theano.function([U], gpu(U+1)) A = gnumpy.rand(*shape) A_cnd = garray_to_cudandarray(A) B_cnd = ii(A_cnd) B = cudandarray_to_garray(B_cnd) from numpy import array B2 = array(B_cnd) u = (A+1).asarray() v = B.asarray() w = B2 assert abs(u-v).max() == 0 assert abs(u-w).max() == 0
def test(shape=(3, 4, 5)): """ Make sure that the gnumpy conversion is exact from garray to CudaNdarray back to garray. """ gpu = theano.sandbox.cuda.basic_ops.gpu_from_host U = gpu(theano.tensor.ftensor3('U')) ii = theano.function([U], gpu(U + 1)) A = gnumpy.rand(*shape) A_cnd = garray_to_cudandarray(A) assert A_cnd.shape == A.shape # dtype always float32 # garray don't have strides B_cnd = ii(A_cnd) B = cudandarray_to_garray(B_cnd) assert A_cnd.shape == A.shape u = (A + 1).asarray() v = B.asarray() w = np.array(B_cnd) assert (u == v).all() assert (u == w).all()
def test2(shape=(3, 4, 5)): """ Make sure that the gnumpy conversion is exact from CudaNdarray to garray back to CudaNdarray. """ gpu = theano.sandbox.cuda.basic_ops.gpu_from_host U = gpu(theano.tensor.ftensor3('U')) theano.function([U], gpu(U + 1)) A = np.random.rand(*shape).astype('float32') A_cnd = theano.sandbox.cuda.CudaNdarray(A) A_gar = cudandarray_to_garray(A_cnd) assert A_cnd.shape == A_gar.shape # dtype always float32 # garray don't have strides B = garray_to_cudandarray(A_gar) assert A_cnd.shape == B.shape # dtype always float32 assert A_cnd._strides == B._strides assert A_cnd.gpudata == B.gpudata v = np.asarray(B) assert (v == A).all()
def cudandarray_to_garray_nested(lst): lst_flat = flatten(lst) lst_flat = [gput.cudandarray_to_garray(i) for i in lst_flat] lst = unflatten(lst, lst_flat) return lst
def ObjFunPrime(self, params): params = gnumpy_utils.garray_to_cudandarray(params) grad = self.grad_Tfunc(params) return gnumpy_utils.cudandarray_to_garray(grad)