def local_mul_s_d(node): if node.op == sparse.mul_s_d: x, y = node.inputs x_is_sparse_variable = _is_sparse_variable(x) if x_is_sparse_variable: svar = x dvar = y else: svar = y dvar = x if dvar.type.ndim != 2: return False if svar.type.format == 'csc': CSx = sparse.CSC mul_s_d_csx = sparse.mul_s_d_csc elif svar.type.format == 'csr': CSx = sparse.CSR mul_s_d_csx = sparse.mul_s_d_csr else: raise NotImplemented() c_data = mul_s_d_csx(sparse.csm_data(svar), sparse.csm_indices(svar), sparse.csm_indptr(svar), dvar) return [CSx(c_data, sparse.csm_indices(svar), sparse.csm_indptr(svar), sparse.csm_shape(svar))] return False
def local_structured_add_s_v(node): if node.op == sparse.structured_add_s_v: x, y = node.inputs x_is_sparse_variable = _is_sparse_variable(x) #y_is_sparse_variable = _is_sparse_variable(y) if x_is_sparse_variable: svar = x dvar = y else: svar = y dvar = x if dvar.type.ndim != 1: return False elif svar.type.format == 'csr': CSx = sparse.CSR structured_add_s_v_csx = sparse.structured_add_s_v_csr else: return False s_val, s_ind, s_ptr, s_shape = sparse.csm_properties(svar) c_data = structured_add_s_v_csx(s_val, s_ind, s_ptr, dvar) return [CSx(c_data, s_ind, s_ptr, s_shape)] return False
def local_usmm_csx(node): """ usmm -> usmm_csc_dense """ if node.op == usmm: alpha, x, y, z = node.inputs x_is_sparse_variable = _is_sparse_variable(x) y_is_sparse_variable = _is_sparse_variable(y) if x_is_sparse_variable and not y_is_sparse_variable: if x.type.format == 'csc': x_val, x_ind, x_ptr, x_shape = csm_properties(x) x_nsparse = x_shape[0] dtype_out = scalar.upcast(alpha.type.dtype, x.type.dtype, y.type.dtype, z.type.dtype) if dtype_out not in ('float32', 'float64'): return False # Sparse cast is not implemented. if y.type.dtype != dtype_out: return False return [usmm_csc_dense(alpha, x_val, x_ind, x_ptr, x_nsparse, y, z)] return False