def local_structured_dot(node): if node.op == sparse._structured_dot: a, b = node.inputs if a.type.format == 'csc': a_val, a_ind, a_ptr, a_shape = csm_properties(a) a_nsparse = a_shape[0] return [sd_csc(a_val, a_ind, a_ptr, a_nsparse, b)] if a.type.format == 'csr': a_val, a_ind, a_ptr, a_shape = csm_properties(a) return [sd_csr(a_val, a_ind, a_ptr, b)] return False
def test_local_csm_properties_csm(): data = tensor.vector() indices, indptr, shape = (tensor.ivector(), tensor.ivector(), tensor.ivector()) mode = theano.compile.mode.get_default_mode() mode = mode.including("specialize", "local_csm_properties_csm") for CS, cast in [(CSC, sp.csc_matrix), (CSR, sp.csr_matrix)]: f = theano.function([data, indices, indptr, shape], csm_properties(CS(data, indices, indptr, shape)), mode=mode) #theano.printing.debugprint(f) assert not any(isinstance(node.op, (CSM, CSMProperties)) for node in f.maker.env.toposort()) v = cast(random_lil((10, 40), config.floatX, 3)) f(v.data, v.indices, v.indptr, v.shape)
def local_usmm_csx(node): """ usmm -> usmm_csc_dense """ if node.op == usmm: alpha, x, y, z = node.inputs x_is_sparse_variable = _is_sparse_variable(x) y_is_sparse_variable = _is_sparse_variable(y) if x_is_sparse_variable and not y_is_sparse_variable: if x.type.format == 'csc': x_val, x_ind, x_ptr, x_shape = csm_properties(x) x_nsparse = x_shape[0] dtype_out = scalar.upcast(alpha.type.dtype, x.type.dtype, y.type.dtype, z.type.dtype) if dtype_out not in ('float32', 'float64'): return False # Sparse cast is not implemented. if y.type.dtype != dtype_out: return False return [usmm_csc_dense(alpha, x_val, x_ind, x_ptr, x_nsparse, y, z)] return False
layer0.addConnections([c1L0]) #layer1 = HebbianAdaptiveLayer(layer0.output,filter_shape,sigma,s1,s2,s2,final_shape,Wi=Wi,Wr=Wr) layers = [layer0]#, layer1]''' out = [input_layer.output]#, layer1.output] inp = T.matrix() #propagate = theano.function([out],y) #updates = [(param_i + LR*layer0.state*(x-y)) for param_i in zip(params)] #update = [(param_i, param_i + LR)] index=T.lscalar() csc_mat = sparse.csc_matrix('cscMat', dtype='float32') qq,ww,ee,rr = sparse.csm_properties(csc_mat) csc_trans = sparse.CSR(qq,ww,ee,rr) #trans = theano.function([csc_mat],csc_trans) Wis = [] Wrs = [] states = [] outs=[] a = sp.csc_matrix(np.asarray([[0, 1, 1], [0, 0, 0], [1, 0, 0]],dtype='float32')) print sparse.transpose(a).toarray() old_W = sparse.csc_matrix('old_W',dtype='float32') # Old weight matrix pop_i = sparse.csc_matrix('pop_i',dtype='float32') # Input layer pop_j = sparse.csc_matrix('pop_j',dtype='float32') # Output layer alpha = T.scalar('alpha',dtype='float32')
def _create_move_function(self): """Creates a Theano function that moves a word to another class. tensor.inc_subtensor actually works like numpy.add.at, so we can use it to add the count as many times as the word occurs in a class. """ updates = [] word_id = tensor.scalar('word_id', dtype=self._count_type) word_id.tag.test_value = 0 new_class_id = tensor.scalar('new_class_id', dtype=self._count_type) new_class_id.tag.test_value = 0 old_class_id = self._word_to_class[word_id] # word word_count = self._word_counts[word_id] c_counts = self._class_counts c_counts = tensor.inc_subtensor(c_counts[old_class_id], -word_count) c_counts = tensor.inc_subtensor(c_counts[new_class_id], word_count) updates.append((self._class_counts, c_counts)) # word, word X data, indices, indptr, _ = sparse.csm_properties(self._ww_counts_csr) right_word_ids = indices[indptr[word_id]:indptr[word_id + 1]] counts = data[indptr[word_id]:indptr[word_id + 1]] selector = tensor.neq(right_word_ids, word_id).nonzero() right_word_ids = right_word_ids[selector] counts = counts[selector] cw_counts = self._cw_counts cw_counts = tensor.inc_subtensor( cw_counts[old_class_id, right_word_ids], -counts) cw_counts = tensor.inc_subtensor( cw_counts[new_class_id, right_word_ids], counts) right_class_ids = self._word_to_class[right_word_ids] cc_counts = self._cc_counts cc_counts = tensor.inc_subtensor( cc_counts[old_class_id, right_class_ids], -counts) cc_counts = tensor.inc_subtensor( cc_counts[new_class_id, right_class_ids], counts) # word X, word data, indices, indptr, _ = sparse.csm_properties(self._ww_counts) left_word_ids = indices[indptr[word_id]:indptr[word_id + 1]] counts = data[indptr[word_id]:indptr[word_id + 1]] selector = tensor.neq(left_word_ids, word_id).nonzero() left_word_ids = left_word_ids[selector] counts = counts[selector] wc_counts = self._wc_counts wc_counts = tensor.inc_subtensor( wc_counts[left_word_ids, old_class_id], -counts) wc_counts = tensor.inc_subtensor( wc_counts[left_word_ids, new_class_id], counts) left_class_ids = self._word_to_class[left_word_ids] cc_counts = tensor.inc_subtensor( cc_counts[left_class_ids, old_class_id], -counts) cc_counts = tensor.inc_subtensor( cc_counts[left_class_ids, new_class_id], counts) # word, word count = self._ww_counts[word_id, word_id] cc_counts = tensor.inc_subtensor(cc_counts[old_class_id, old_class_id], -count) cc_counts = tensor.inc_subtensor(cc_counts[new_class_id, new_class_id], count) cw_counts = tensor.inc_subtensor(cw_counts[old_class_id, word_id], -count) cw_counts = tensor.inc_subtensor(cw_counts[new_class_id, word_id], count) wc_counts = tensor.inc_subtensor(wc_counts[word_id, old_class_id], -count) wc_counts = tensor.inc_subtensor(wc_counts[word_id, new_class_id], count) updates.append((self._cc_counts, cc_counts)) updates.append((self._cw_counts, cw_counts)) updates.append((self._wc_counts, wc_counts)) w_to_c = self._word_to_class w_to_c = tensor.set_subtensor(w_to_c[word_id], new_class_id) updates.append((self._word_to_class, w_to_c)) self._move = theano.function([word_id, new_class_id], [], updates=updates, name='move')
x = sparse.csc_matrix(name='x', dtype='float32') y = sparse.dense_from_sparse(x) z = sparse.csc_from_dense(y) # 2.2 Properties and Construction # Although sparse variables do not allow direct to their properties, this # can be accomplished using the csm_properties function. This will return # a tuple of one-dimensional tensor variables that represents the internal # characteristics of the sparse matrix. # In order to reconstruct a sparse matrix from some properties, the # function CSC and CSR can be used. This will create the sparse matrix in # desired format. As an example, the following code reconstructs a csc # matrix into a csr one. x = sparse.csc_matrix(name='x', dtype='int64') data, indices, indptr, shape = sparse.csm_properties(x) y = sparse.CSR(data, indices, indptr, shape) f = theano.function([x], y) a = sp.csc_matrix(np.asarray([[0, 1, 1], [0, 0, 0], [1, 0, 0]])) #, dtype='int64' print a.toarray() print f(a).toarray() # The last example shows that one format can be obtained from transposition # of the other. Indeed, when calling the transpose function, the sparse # characteristics of the resulting matrix cannot be the same as the one # provided as input. # 2.3 Structured Operation # Several ops are set to make use of the very peculiar structure of the # sparse matrices. These ops are said to be structured and simply do not # perform any computations on the zero elements of the sparse matrix. They # can be though as being applied only to the data attribute of the latter.
# pylint: disable = bad-whitespace, invalid-name, no-member, bad-continuation, assignment-from-no-return # if shape[0] > shape[1], use csr. Otherwise, use csc # but, not all ops are available for both yet # so use the one that has what you need # to and fro x = sparse.csc_matrix(name='x', dtype='float32') y = sparse.dense_from_sparse(x) z = sparse.csc_from_dense(y) # resconstruct a csc from a csr x = sparse.csc_matrix(name='x', dtype='int64') data, indices, indptr, shape = sparse.csm_properties(x) y = sparse.CSR(data, indices, indptr, shape) f = theano.function([x], y) a = sp.csc_matrix(np.asarray([[0, 1, 1], [0, 0, 0], [1, 0, 0]])) print a.toarray() print f(a).toarray() # "structured" operations # act only on (originally) nonzero elements x = sparse.csc_matrix(name='x', dtype='float32') y = sparse.structured_add(x, 2) f = theano.function([x], y) a = sp.csc_matrix( np.asarray([[0, 0, -1], [0, -2, 1], [3, 0, 0]], dtype='float32')) print a.toarray()
tI = T.ivector() bla = sparse.construct_sparse_from_list(tsize, tV, tI) v = ones(2).astype(float32) i = arange(2) j = arange(2) m = sp.csc_matrix( (v, (i,j)), shape=(4,2)) tdata = T.vector() tindices = T.ivector() tindptr = T.ivector() tshape = T.ivector() x = sparse.csc_matrix() a,b,c,d = sparse.csm_properties(x) print a.eval({x:m}) print b.eval({x:m}) print c.eval({x:m}) print d.eval({x:m}) tm = sparse.CSC(tdata, tindices, tindptr, tshape) shape = array([5,3]).astype(int32) indices = array( [0,2,4] ).astype(int32) indptr = arange(4).astype(int32) data = ones(3).astype(float32) m2 = tm.eval( {tdata:data, tindices:indices, tindptr:indptr, tshape:shape}) ty = T.ivector()
def _create_move_function(self): """Creates a Theano function that moves a word to another class. tensor.inc_subtensor actually works like numpy.add.at, so we can use it to add the count as many times as the word occurs in a class. """ updates = [] word_id = tensor.scalar('word_id', dtype=self._count_type) new_class_id = tensor.scalar('new_class_id', dtype=self._count_type) old_class_id = self._word_to_class[word_id] # word word_count = self._word_counts[word_id] c_counts = self._class_counts c_counts = tensor.inc_subtensor(c_counts[old_class_id], -word_count) c_counts = tensor.inc_subtensor(c_counts[new_class_id], word_count) updates.append((self._class_counts, c_counts)) # word, word X data, indices, indptr, _ = sparse.csm_properties(self._ww_counts_csr) right_word_ids = indices[indptr[word_id]:indptr[word_id + 1]] counts = data[indptr[word_id]:indptr[word_id + 1]] selector = tensor.neq(right_word_ids, word_id).nonzero() right_word_ids = right_word_ids[selector] counts = counts[selector] cw_counts = self._cw_counts cw_counts = tensor.inc_subtensor(cw_counts[old_class_id,right_word_ids], -counts) cw_counts = tensor.inc_subtensor(cw_counts[new_class_id,right_word_ids], counts) right_class_ids = self._word_to_class[right_word_ids] cc_counts = self._cc_counts cc_counts = tensor.inc_subtensor(cc_counts[old_class_id,right_class_ids], -counts) cc_counts = tensor.inc_subtensor(cc_counts[new_class_id,right_class_ids], counts) # word X, word data, indices, indptr, _ = sparse.csm_properties(self._ww_counts) left_word_ids = indices[indptr[word_id]:indptr[word_id + 1]] counts = data[indptr[word_id]:indptr[word_id + 1]] selector = tensor.neq(left_word_ids, word_id).nonzero() left_word_ids = left_word_ids[selector] counts = counts[selector] wc_counts = self._wc_counts wc_counts = tensor.inc_subtensor(wc_counts[left_word_ids,old_class_id], -counts) wc_counts = tensor.inc_subtensor(wc_counts[left_word_ids,new_class_id], counts) left_class_ids = self._word_to_class[left_word_ids] cc_counts = tensor.inc_subtensor(cc_counts[left_class_ids,old_class_id], -counts) cc_counts = tensor.inc_subtensor(cc_counts[left_class_ids,new_class_id], counts) # word, word count = self._ww_counts[word_id,word_id] cc_counts = tensor.inc_subtensor(cc_counts[old_class_id,old_class_id], -count) cc_counts = tensor.inc_subtensor(cc_counts[new_class_id,new_class_id], count) cw_counts = tensor.inc_subtensor(cw_counts[old_class_id,word_id], -count) cw_counts = tensor.inc_subtensor(cw_counts[new_class_id,word_id], count) wc_counts = tensor.inc_subtensor(wc_counts[word_id,old_class_id], -count) wc_counts = tensor.inc_subtensor(wc_counts[word_id,new_class_id], count) updates.append((self._cc_counts, cc_counts)) updates.append((self._cw_counts, cw_counts)) updates.append((self._wc_counts, wc_counts)) w_to_c = self._word_to_class w_to_c = tensor.set_subtensor(w_to_c[word_id], new_class_id) updates.append((self._word_to_class, w_to_c)) self._move = theano.function( [word_id, new_class_id], [], updates=updates, name='move')
tsize = T.matrix() tV = T.matrix() tI = T.ivector() bla = sparse.construct_sparse_from_list(tsize, tV, tI) v = ones(2).astype(float32) i = arange(2) j = arange(2) m = sp.csc_matrix((v, (i, j)), shape=(4, 2)) tdata = T.vector() tindices = T.ivector() tindptr = T.ivector() tshape = T.ivector() x = sparse.csc_matrix() a, b, c, d = sparse.csm_properties(x) print a.eval({x: m}) print b.eval({x: m}) print c.eval({x: m}) print d.eval({x: m}) tm = sparse.CSC(tdata, tindices, tindptr, tshape) shape = array([5, 3]).astype(int32) indices = array([0, 2, 4]).astype(int32) indptr = arange(4).astype(int32) data = ones(3).astype(float32) m2 = tm.eval({tdata: data, tindices: indices, tindptr: indptr, tshape: shape}) ty = T.ivector()