def __init__(self, data, shape = None): """Constructor for tensor object. data can be numpy.array or list. shape can be numpy.array, list, tuple of integers""" if(data.__class__ == list): data = numpy.array(data); if(shape != None): if(len(shape) == 0): raise ValueError("Second argument must be a row vector."); if(shape.__class__ == numpy.ndarray): if(shape.ndim != 2 and shape[0].size != 1): raise ValueError("Second argument must be a row vector."); shape = tuple(shape); else: shape = tuple(data.shape); if (len(shape) == 0): if (data.size != 0): raise ValueError("Empty tensor cannot contain any elements"); elif (tools.prod(shape) != data.size): raise ValueError("Size of data does not match specified size of tensor"); self.shape = shape; self.data = data.reshape(self.shape, order='F');
def ttm(self, mat, dims = None, option = None): """ computes the tensor times the given matrix. arrs is a single 2-D matrix/array or a list of those matrices/arrays.""" if(dims == None): dims = range(0,self.ndims()); #Handle when arrs is a list of arrays if(mat.__class__ == list): if(len(mat) == 0): raise ValueError("the given list of arrays is empty!"); (dims,vidx) = tools.tt_dimscheck(dims, self.ndims(), len(mat)); Y = self.ttm(mat[vidx[0]],dims[0],option); for i in range(1, len(dims)): Y = Y.ttm(mat[vidx[i]],dims[i],option); return Y; if(mat.ndim != 2): raise ValueError ("matrix in 2nd armuent must be a matrix!"); if(dims.__class__ == list): if(len(dims) != 1): raise ValueError("Error in number of elements in dims"); else: dims = dims[0]; if(dims < 0 or dims > self.ndims()): raise ValueError ("Dimension N must be between 1 and num of dimensions"); #Compute the product N = self.ndims(); shp = self.shape; order = [] order.extend([dims]); order.extend(range(0,dims)); order.extend(range(dims+1,N)); newdata = self.permute(order).data; newdata = newdata.reshape(shp[dims], tools.prod(shp)/shp[dims]); if(option == None): newdata = numpy.dot(mat, newdata); p = mat.shape[0]; elif(option == 't'): newdata = numpy.dot(mat.transpose(), newdata); p = mat.shape[1]; else: raise ValueError("Unknown option"); newshp = [p]; newshp.extend(tools.getelts(shp,range(0,dims))); newshp.extend(tools.getelts(shp,range(dims+1,N))); Y = tensor(newdata, newshp); Y = Y.ipermute(order); return Y;
def ttm(self, mat, dims = None, option = None): """ computes the tensor times the given matrix. arrs is a single 2-D matrix/array or a list of those matrices/arrays.""" if(dims == None): dims = range(0,self.ndims()); #Handle when arrs is a list of arrays if(mat.__class__ == list): if(len(mat) == 0): raise ValueError("the given list of arrays is empty!"); (dims,vidx) = tools.tt_dimscehck(dims, self.ndims(), len(mat)); Y = self.ttm(mat[vidx[0]],dims[0],option); for i in range(1, len(dims)): Y = Y.ttm(mat[vidx[i]],dims[i],option); return Y; if(mat.ndim != 2): raise ValueError ("matrix in 2nd armuent must be a matrix!"); if(dims.__class__ == list): if(len(dims) != 1): raise ValueError("Error in number of elements in dims"); else: dims = dims[0]; if(dims < 0 or dims > self.ndims()): raise ValueError ("Dimension N must be between 1 and num of dimensions"); #Compute the product N = self.ndims(); shp = self.shape; order = [] order.extend([dims]); order.extend(range(0,dims)); order.extend(range(dims+1,N)); newdata = self.permute(order).data; newdata = newdata.reshape(shp[dims], tools.prod(shp)/shp[dims]); if(option == None): newdata = numpy.dot(mat, newdata); p = mat.shape[0]; elif(option == 't'): newdata = numpy.dot(mat.transpose(), newdata); p = mat.shape[1]; else: raise ValueError("Unknown option"); newshp = [p]; newshp.extend(tools.getelts(shp,range(0,dims))); newshp.extend(tools.getelts(shp,range(dims+1,N))); Y = tensor(newdata, newshp); Y = Y.ipermute(order); return Y;
def __init__(self, data, shape = None): """Constructor for tensor object. dat can be numpy.array or list. shape can be numpy.array, list, tuple of integers""" if(data.__class__ == list): data = numpy.array(data); if(shape != None): if(len(shape) == 0): raise ValueError("Second argument must be a row vector."); if(shape.__class__ == numpy.ndarray): if(shape.ndim != 2 and shape[0].size != 1): raise ValueError("Second argument must be a row vector."); shape = tuple(shape); else: shape = tuple(data.shape); if (len(shape) == 0): if (data.size != 0): raise ValueError("Empty tensor cannot contain any elements"); elif (tools.prod(shape) != data.size): raise ValueError("Size of data does not match specified size of tensor"); self.shape = shape; self.data = data.reshape(self.shape);
def gm(values): """Computes the geometric mean of a list of numbers. >>> print gm([2, 8]) 4.0 """ assert len(values) >= 1 return round(tools.prod(values) ** (1/len(values)), 4)
def __call__(self, *args, **kwargs): _, nd, dims, strs, offsets, contig = check_args(args, collapse=False, broadcast=False) out = kwargs.pop('out', None) if len(kwargs) != 0: raise TypeError('Unexpected keyword argument: %s' % kwargs.keys()[0]) n = prod(dims) out_shape = tuple(d for i, d in enumerate(dims) if not self.redux[i]) gs = prod(out_shape) if gs == 0: gs = 1 n /= gs if gs > self.context.maxgsize: raise ValueError("Array to big to be reduced along the " "selected axes") if out is None: out = gpuarray.empty(out_shape, context=self.context, dtype=self.dtype_out) else: if out.shape != out_shape or out.dtype != self.dtype_out: raise TypeError( "Out array is not of expected type " "(expected %s %s, got %s %s)" % (out_shape, self.dtype_out, out.shape, out.dtype)) #Don't compile and cache for nothing for big size if self.init_local_size < n: k, _, _, ls = self._get_basic_kernel(self.init_local_size, nd) else: k, _, _, ls = self._get_basic_kernel(n, nd) kargs = [n, out] kargs.extend(dims) for i, arg in enumerate(args): kargs.append(arg) if isinstance(arg, gpuarray.GpuArray): kargs.append(offsets[i]) kargs.extend(strs[i]) k(*kargs, ls=ls, gs=gs) return out
def gm(values): """Computes the geometric mean of a list of numbers. >>> gm([2, 8]) 4.0 """ assert len(values) >= 1 exp = 1.0 / len(values) return round(tools.prod([val ** exp for val in values]), 4)
def __call__(self, *args, **kwargs): _, nd, dims, strs, offsets, contig = check_args(args, collapse=False, broadcast=False) out = kwargs.pop('out', None) if len(kwargs) != 0: raise TypeError('Unexpected keyword argument: %s' % kwargs.keys()[0]) n = prod(dims) out_shape = tuple(d for i, d in enumerate(dims) if not self.redux[i]) gs = prod(out_shape) if gs == 0: gs = 1 n /= gs if gs > self.context.maxgsize: raise ValueError("Array to big to be reduced along the " "selected axes") if out is None: out = gpuarray.empty(out_shape, context=self.context, dtype=self.dtype_out) else: if out.shape != out_shape or out.dtype != self.dtype_out: raise TypeError("Out array is not of expected type " "(expected %s %s, got %s %s)" % ( out_shape, self.dtype_out, out.shape, out.dtype)) #Don't compile and cache for nothing for big size if self.init_local_size < n: k, _, _, ls = self._get_basic_kernel(self.init_local_size, nd) else: k, _, _, ls = self._get_basic_kernel(n, nd) kargs = [n, out] kargs.extend(dims) for i, arg in enumerate(args): kargs.append(arg) if isinstance(arg, gpuarray.GpuArray): kargs.append(offsets[i]) kargs.extend(strs[i]) k(*kargs, ls=ls, gs=gs) return out
def ttv(self, vec, dims=None): if (dims is None): dims = range(0, self.ndims()) if (vec.__class__ == list): if (len(vec) == 0): raise ValueError("the given list of arrays is empty!") (dims, vidx) = tools.tt_dimscheck(dims, self.ndims(), len(vec)) Y = self.ttv(vec[vidx[0]], dims[0]) for i in range(1, len(dims)): Y = Y.ttv(vec[videx[i]], dims[i]) return Y if (vec.ndim != 1): raise ValueError("param in 2nd argment must be a vector") if (dims.__class__ == list): if (len(dims) != 1): raise ValueError("Error in number of elements in dims") else: dims = dims[0] if (dims < 0 or dims > self.ndims()): raise ValueError( "Dimension N must be between 1 and num of dimensions") N = self.ndims() shp = self.shape order = [] order.extend(range(0, dims)) order.extend(range(dims + 1, N)) order.extend([dims]) newdata = self.permute(order).data newdata = newdata.reshape(tools.prod(shp) / shp[dims], shp[dims]) newdata = numpy.dot(newdata, vec) N -= 1 sz = numpy.array(self.shape)[order] newshp = sz[:N] # newshp = [] # newshp.extend(tools.getelts(shp, range(0, dims))) # newshp.extend(tools.getelts(shp, range(dims+1, N))) Y = tensor(newdata, newshp) return Y
def ttm(self, mat, dims=None, option=None): """ computes the tensor times the given matrix. arrs is a single 2-D matrix/array or a list of those matrices/arrays.""" if (dims == None): dims = range(0, self.ndims()) #Handle when arrs is a list of arrays if (mat.__class__ == list): if (len(mat) == 0): raise ValueError("the given list of arrays is empty!") (dims, vidx) = tools.tt_dimscehck(dims, self.ndims(), len(mat)) Y = self.ttm(mat[vidx[0]], dims[0], option) for i in range(1, len(dims)): Y = Y.ttm(mat[vidx[i]], dims[i], option) return Y if (mat.ndim != 2): raise ValueError("matrix in 2nd armuent must be a matrix!") if (dims.__class__ == list): if (len(dims) != 1): raise ValueError("Error in number of elements in dims") else: dims = dims[0] if (dims < 0 or dims > self.ndims()): raise ValueError( "Dimension N must be between 1 and num of dimensions") #Compute the product N = self.ndims() shp = self.shape order = [] order.extend([dims]) order.extend(range(0, dims)) order.extend(range(dims + 1, N)) # 第一步把对应模I Permute到第一个,然后reshape成(I,I~) # 第二步matrix * 上面得到的矩阵 newdata = self.permute(order).data newdata = newdata.reshape(shp[dims], tools.prod(shp) / shp[dims]) if (option == None): newdata = numpy.dot(mat, newdata) p = mat.shape[0] elif (option == 't'): newdata = numpy.dot(mat.transpose(), newdata) p = mat.shape[1] else: raise ValueError("Unknown option") newshp = [p] newshp.extend(tools.getelts(shp, range(0, dims))) newshp.extend(tools.getelts(shp, range(dims + 1, N))) Y = tensor(newdata, newshp) # 这里的ipermute很关键,按照相反permute,理解的不是很直观,有时间可以多想想 Y = Y.ipermute(order) return Y
def gm_old(values): return round(prod(values) ** (1 / len(values)), 2)