def backward_pass(x, y, output, hidden_output, W_output): output_error = -(y - output) # Calculate error output_over_net = output*(1 - output) # Derivative of sigmoid function sigmoid_on_error = cp.multiply(output_error, output_over_net) # Calculate the sigmoid function's affect on error W_output = cp.transpose(W_output) hidden_error = cp.dot(sigmoid_on_error, W_output) # Calculate the affect of output weights on hidden weights' error hidden_over_net = hidden_output*(1 - hidden_output) # Derivative of sigmoid function sigmoid_on_hidden_error = cp.multiply(hidden_error, hidden_over_net) # Calculate the sigmoid function's affect on error # Correctly arrange matrices for calculations x = cp.atleast_2d(x) hidden_output = cp.atleast_2d(hidden_output) x_transpose = cp.transpose(x) hidden_output_transpose = cp.transpose(hidden_output) sigmoid_on_hidden_error = sigmoid_on_hidden_error.reshape(1, sigmoid_on_hidden_error.size) sigmoid_on_error = sigmoid_on_error.reshape(1, sigmoid_on_error.size) # Calculate weight changes W_hidden_c = cp.dot(x_transpose, sigmoid_on_hidden_error) W_output_c = cp.dot(hidden_output_transpose, sigmoid_on_error) # Calculate bias changes B_hidden_c = sigmoid_on_hidden_error B_output_c = sigmoid_on_error return W_output_c, W_hidden_c, B_hidden_c, B_output_c
def __truediv__(self, other): """Point-wise division by another matrix, vector or scalar""" if _util.isscalarlike(other): dtype = self.dtype if dtype == numpy.float32: # Note: This is a work-around to make the output dtype the same # as SciPy. It might be SciPy version dependent. dtype = numpy.float64 dtype = cupy.result_type(dtype, other) d = cupy.reciprocal(other, dtype=dtype) return multiply_by_scalar(self, d) elif _util.isdense(other): other = cupy.atleast_2d(other) check_shape_for_pointwise_op(self.shape, other.shape) return self.todense() / other elif base.isspmatrix(other): # Note: If broadcasting is needed, an exception is raised here for # compatibility with SciPy, as SciPy does not support broadcasting # in the "sparse / sparse" case. check_shape_for_pointwise_op(self.shape, other.shape, allow_broadcasting=False) dtype = numpy.promote_types(self.dtype, other.dtype) if dtype.char not in 'FD': dtype = numpy.promote_types(numpy.float64, dtype) # Note: The following implementation converts two sparse matrices # into dense matrices and then performs a point-wise division, # which can use lots of memory. self_dense = self.todense().astype(dtype, copy=False) return self_dense / other.todense() raise NotImplementedError
def __init__(self, arg1, shape=None, dtype=None, copy=False): if isinstance(arg1, tuple): data, offsets = arg1 if shape is None: raise ValueError('expected a shape argument') else: raise ValueError('unrecognized form for dia_matrix constructor') data = cupy.array(data, dtype=dtype, copy=copy) data = cupy.atleast_2d(data) offsets = cupy.array(offsets, dtype='i', copy=copy) offsets = cupy.atleast_1d(offsets) if offsets.ndim != 1: raise ValueError('offsets array must have rank 1') if data.ndim != 2: raise ValueError('data array must have rank 2') if data.shape[0] != len(offsets): raise ValueError( 'number of diagonals (%d) does not match the number of ' 'offsets (%d)' % (data.shape[0], len(offsets))) sorted_offsets = cupy.sort(offsets) if (sorted_offsets[:-1] == sorted_offsets[1:]).any(): raise ValueError('offset array contains duplicate values') self.data = data self.offsets = offsets self._shape = shape
def _maximum_minimum(self, other, cupy_op, op_name, dense_check): if _util.isscalarlike(other): other = cupy.asarray(other, dtype=self.dtype) if dense_check(other): dtype = self.dtype # Note: This is a work-around to make the output dtype the same # as SciPy. It might be SciPy version dependent. if dtype == numpy.float32: dtype = numpy.float64 elif dtype == numpy.complex64: dtype = numpy.complex128 dtype = cupy.result_type(dtype, other) other = other.astype(dtype, copy=False) # Note: The computation steps below are different from SciPy. new_array = cupy_op(self.todense(), other) return csr_matrix(new_array) else: self.sum_duplicates() new_data = cupy_op(self.data, other) return csr_matrix((new_data, self.indices, self.indptr), shape=self.shape, dtype=self.dtype) elif _util.isdense(other): self.sum_duplicates() other = cupy.atleast_2d(other) return cupy_op(self.todense(), other) elif isspmatrix_csr(other): self.sum_duplicates() other.sum_duplicates() return binopt_csr(self, other, op_name) raise NotImplementedError
def hilbert2(x, N=None): """ Compute the '2-D' analytic signal of `x` Parameters ---------- x : array_like 2-D signal data. N : int or tuple of two ints, optional Number of Fourier components. Default is ``x.shape`` Returns ------- xa : ndarray Analytic signal of `x` taken along axes (0,1). References ---------- .. [1] Wikipedia, "Analytic signal", https://en.wikipedia.org/wiki/Analytic_signal """ x = atleast_2d(x) if x.ndim > 2: raise ValueError("x must be 2-D.") if iscomplexobj(x): raise ValueError("x must be real.") if N is None: N = x.shape elif isinstance(N, int): if N <= 0: raise ValueError("N must be positive.") N = (N, N) elif len(N) != 2 or cp.any(cp.asarray(N) <= 0): raise ValueError( "When given as a tuple, N must hold exactly two positive integers" ) Xf = fftpack.fft2(x, N, axes=(0, 1)) h1 = zeros(N[0], "d") h2 = zeros(N[1], "d") for p in range(2): h = eval("h%d" % (p + 1)) N1 = N[p] if N1 % 2 == 0: h[0] = h[N1 // 2] = 1 h[1 : N1 // 2] = 2 else: h[0] = 1 h[1 : (N1 + 1) // 2] = 2 exec("h%d = h" % (p + 1), globals(), locals()) h = h1[:, newaxis] * h2[newaxis, :] k = x.ndim while k > 2: h = h[:, newaxis] k -= 1 x = fftpack.ifft2(Xf * h, axes=(0, 1)) return x
def sample(self, n_samples=1, random_state=None): """ Generate random samples from the model. Currently, this is implemented only for gaussian and tophat kernels, and the Euclidean metric. Parameters ---------- n_samples : int, default=1 Number of samples to generate. random_state : int, cupy RandomState instance or None, default=None Returns ------- X : cupy array of shape (n_samples, n_features) List of samples. """ if not hasattr(self, "X_"): raise NotFittedError() supported_kernels = ["gaussian", "tophat"] if (self.kernel not in supported_kernels or self.metric != "euclidean"): raise NotImplementedError( "Only {} kernels, and the euclidean" " metric are supported.".format(supported_kernels)) if isinstance(random_state, cp.random.RandomState): rng = random_state else: rng = cp.random.RandomState(random_state) u = rng.uniform(0, 1, size=n_samples) if self.sample_weight_ is None: i = (u * self.X_.shape[0]).astype(np.int64) else: cumsum_weight = cp.cumsum(self.sample_weight_) sum_weight = cumsum_weight[-1] i = cp.searchsorted(cumsum_weight, u * sum_weight) if self.kernel == "gaussian": return cp.atleast_2d(rng.normal(self.X_[i], self.bandwidth)) elif self.kernel == "tophat": # we first draw points from a d-dimensional normal distribution, # then use an incomplete gamma function to map them to a uniform # d-dimensional tophat distribution. has_scipy(raise_if_unavailable=True) dim = self.X_.shape[1] X = rng.normal(size=(n_samples, dim)) s_sq = cp.einsum("ij,ij->i", X, X).get() # do this on the CPU becaause we don't have # a gammainc function readily available correction = cp.array( gammainc(0.5 * dim, 0.5 * s_sq)**(1.0 / dim) * self.bandwidth / np.sqrt(s_sq)) return self.X_[i] + X * correction[:, np.newaxis]
def block_diag(*arrs): """Create a block diagonal matrix from provided arrays. Given the inputs ``A``, ``B``, and ``C``, the output will have these arrays arranged on the diagonal:: [A, 0, 0] [0, B, 0] [0, 0, C] Args: A, B, C, ... (cupy.ndarray): Input arrays. A 1-D array of length ``n`` is treated as a 2-D array with shape ``(1,n)``. Returns: (cupy.ndarray): Array with ``A``, ``B``, ``C``, ... on the diagonal. Output has the same dtype as ``A``. .. seealso:: :func:`scipy.linalg.block_diag` """ if not arrs: return cupy.empty((1, 0)) # Convert to 2D and check if len(arrs) == 1: arrs = (cupy.atleast_2d(*arrs), ) else: arrs = cupy.atleast_2d(*arrs) if any(a.ndim != 2 for a in arrs): bad = [k for k in range(len(arrs)) if arrs[k].ndim != 2] raise ValueError('arguments in the following positions have dimension ' 'greater than 2: {}'.format(bad)) shapes = tuple(a.shape for a in arrs) shape = tuple(sum(x) for x in zip(*shapes)) dtype = cupy.find_common_type([a.dtype for a in arrs], []) out = cupy.zeros(shape, dtype=dtype) r, c = 0, 0 for arr in arrs: rr, cc = arr.shape out[r:r + rr, c:c + cc] = arr r += rr c += cc return out
def _validate_sos(sos): """Helper to validate a SOS input""" sos = cp.atleast_2d(sos) if sos.ndim != 2: raise ValueError("sos array must be 2D") n_sections, m = sos.shape if m != 6: raise ValueError("sos array must be shape (n_sections, 6)") if not (sos[:, 3] == 1).all(): raise ValueError("sos[:, 3] should be all ones") return sos, n_sections
def __getitem__(self, key): # For testing- Scipy >= 1.4.0 is needed to guarantee # results match. if scipy_available and numpy.lib.NumpyVersion( scipy.__version__) < '1.4.0': raise NotImplementedError( "Sparse __getitem__() requires Scipy >= 1.4.0") row, col = self._parse_indices(key) # Dispatch to specialized methods. if isinstance(row, _int_scalar_types): if isinstance(col, _int_scalar_types): return self._get_intXint(row, col) elif isinstance(col, slice): return self._get_intXslice(row, col) elif col.ndim == 1: return self._get_intXarray(row, col) raise IndexError('index results in >2 dimensions') elif isinstance(row, slice): if isinstance(col, _int_scalar_types): return self._get_sliceXint(row, col) elif isinstance(col, slice): if row == slice(None) and row == col: return self.copy() return self._get_sliceXslice(row, col) elif col.ndim == 1: return self._get_sliceXarray(row, col) raise IndexError('index results in >2 dimensions') elif row.ndim == 1: if isinstance(col, _int_scalar_types): return self._get_arrayXint(row, col) elif isinstance(col, slice): return self._get_arrayXslice(row, col) else: # row.ndim == 2 if isinstance(col, _int_scalar_types): return self._get_arrayXint(row, col) elif isinstance(col, slice): raise IndexError('index results in >2 dimensions') elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1): # special case for outer indexing return self._get_columnXarray(row[:, 0], col.ravel()) # The only remaining case is inner (fancy) indexing row, col = cupy.broadcast_arrays(row, col) if row.shape != col.shape: raise IndexError('number of row and column indices differ') if row.size == 0: return self.__class__(cupy.atleast_2d(row).shape, dtype=self.dtype) return self._get_arrayXarray(row, col)
def aslinearoperator(A): """Return `A` as a LinearOperator. Args: A (array-like): The input array to be converted to a `LinearOperator` object. It may be any of the following types: * :class:`cupy.ndarray` * sparse matrix (e.g. ``csr_matrix``, ``coo_matrix``, etc.) * :class:`cupyx.scipy.sparse.linalg.LinearOperator` * object with ``.shape`` and ``.matvec`` attributes Returns: cupyx.scipy.sparse.linalg.LinearOperator: `LinearOperator` object .. seealso:: :func:`scipy.sparse.aslinearoperator`` """ if isinstance(A, LinearOperator): return A elif isinstance(A, cupy.ndarray): if A.ndim > 2: raise ValueError('array must have ndim <= 2') A = cupy.atleast_2d(A) return MatrixLinearOperator(A) elif sparse.isspmatrix(A): return MatrixLinearOperator(A) else: if hasattr(A, 'shape') and hasattr(A, 'matvec'): rmatvec = None rmatmat = None dtype = None if hasattr(A, 'rmatvec'): rmatvec = A.rmatvec if hasattr(A, 'rmatmat'): rmatmat = A.rmatmat if hasattr(A, 'dtype'): dtype = A.dtype return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec, rmatmat=rmatmat, dtype=dtype) else: raise TypeError('type not understood')
def multiply(self, other): """Point-wise multiplication by another matrix, vector or scalar""" if cupy.isscalar(other): return multiply_by_scalar(self, other) elif _util.isdense(other): self.sum_duplicates() other = cupy.atleast_2d(other) return multiply_by_dense(self, other) elif isspmatrix_csr(other): self.sum_duplicates() other.sum_duplicates() return multiply_by_csr(self, other) else: msg = 'expected scalar, dense matrix/vector or csr matrix' raise TypeError(msg)
def hilbert2(x, N=None): """ Compute the '2-D' analytic signal of `x` Parameters ---------- x : array_like 2-D signal data. N : int or tuple of two ints, optional Number of Fourier components. Default is ``x.shape`` Returns ------- xa : ndarray Analytic signal of `x` taken along axes (0,1). References ---------- .. [1] Wikipedia, "Analytic signal", https://en.wikipedia.org/wiki/Analytic_signal """ x = cp.atleast_2d(x) if x.ndim > 2: raise ValueError("x must be 2-D.") if cp.iscomplexobj(x): raise ValueError("x must be real.") if N is None: N = x.shape elif isinstance(N, int): if N <= 0: raise ValueError("N must be positive.") N = (N, N) elif len(N) != 2 or cp.any(cp.asarray(N) <= 0): raise ValueError( "When given as a tuple, N must hold exactly two positive integers") Xf = cp.fft.fft2(x, N, axes=(0, 1)) h1, h2 = _hilbert2_kernel(size=N[1]) h = h1[:, cp.newaxis] * h2[cp.newaxis, :] k = x.ndim while k > 2: h = h[:, cp.newaxis] k -= 1 x = cp.fft.ifft2(Xf * h, axes=(0, 1)) return x
def vstack(tup): """Stacks arrays vertically. If an input array has one dimension, then the array is treated as a horizontal vector and stacked along the additional axis at the head. Otherwise, the array is stacked along the first axis. Args: tup (sequence of arrays): Arrays to be stacked. Each array is converted by :func:`cupy.atleast_2d` before stacking. Returns: cupy.ndarray: Stacked array. .. seealso:: :func:`numpy.dstack` """ return concatenate(cupy.atleast_2d(*tup), 0)
def __init__(self, arg1, shape=None, dtype=None, copy=False): if _scipy_available and scipy.sparse.issparse(arg1): x = arg1.todia() data = x.data offsets = x.offsets shape = x.shape dtype = x.dtype copy = False elif isinstance(arg1, tuple): data, offsets = arg1 if shape is None: raise ValueError('expected a shape argument') else: raise ValueError( 'unrecognized form for dia_matrix constructor') data = cupy.array(data, dtype=dtype, copy=copy) data = cupy.atleast_2d(data) offsets = cupy.array(offsets, dtype='i', copy=copy) offsets = cupy.atleast_1d(offsets) if offsets.ndim != 1: raise ValueError('offsets array must have rank 1') if data.ndim != 2: raise ValueError('data array must have rank 2') if data.shape[0] != len(offsets): raise ValueError( 'number of diagonals (%d) does not match the number of ' 'offsets (%d)' % (data.shape[0], len(offsets))) sorted_offsets = cupy.sort(offsets) if (sorted_offsets[:-1] == sorted_offsets[1:]).any(): raise ValueError('offset array contains duplicate values') self.data = data self.offsets = offsets if not util.isshape(shape): raise ValueError('invalid shape (must be a 2-tuple of int)') self._shape = int(shape[0]), int(shape[1])
def subdivide_polygon(coords, degree=2, preserve_ends=False): """Subdivision of polygonal curves using B-Splines. Note that the resulting curve is always within the convex hull of the original polygon. Circular polygons stay closed after subdivision. Parameters ---------- coords : (N, 2) array Coordinate array. degree : {1, 2, 3, 4, 5, 6, 7}, optional Degree of B-Spline. Default is 2. preserve_ends : bool, optional Preserve first and last coordinate of non-circular polygon. Default is False. Returns ------- coords : (M, 2) array Subdivided coordinate array. References ---------- .. [1] http://mrl.nyu.edu/publications/subdiv-course2000/coursenotes00.pdf """ if degree not in _SUBDIVISION_MASKS: raise ValueError("Invalid B-Spline degree. Only degree 1 - 7 is " "supported.") circular = cp.all(coords[0, :] == coords[-1, :]) method = 'valid' if circular: # remove last coordinate because of wrapping coords = coords[:-1, :] # circular convolution by wrapping boundaries method = 'same' mask_even, mask_odd = _SUBDIVISION_MASKS[degree] # divide by total weight float_dtype = coords.dtype if coords.dtype.kind == 'f' else cp.float64 mask_even = cp.array(mask_even, float_dtype) / (2**degree) mask_odd = cp.array(mask_odd, float_dtype) / (2**degree) even = signal.convolve2d(coords.T, cp.atleast_2d(mask_even), mode=method, boundary='wrap') odd = signal.convolve2d(coords.T, cp.atleast_2d(mask_odd), mode=method, boundary='wrap') out = cp.empty((even.shape[1] + odd.shape[1], 2), dtype=float_dtype) out[1::2] = even.T out[::2] = odd.T if circular: # close polygon out = cp.vstack([out, out[0, :]]) if preserve_ends and not circular: out = cp.vstack([coords[0, :], out, coords[-1, :]]) return out
def mi_model_1d_gpu_gd(x, y, biascorrect=False, demeaned=False): """Mutual information between a Gaussian and a discrete variable in bits. This method is based on ANOVA style model comparison. I = mi_model_gd(x,y) returns the MI between the (possibly multidimensional) Gaussian variable x and the discrete variable y. Parameters ---------- x, y : array_like Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y must be an array of integers biascorrect : bool | True Specifies whether bias correction should be applied to the estimated MI demeaned : bool | False Specifies whether the input data already has zero mean (true if it has been copula-normalized) Returns ------- i : float Information shared by x and y (in bits) """ # Converting to cupy array #x, y = cp.array(x), cp.array(y) x, y = cp.atleast_2d(x), cp.squeeze(y) if x.ndim > 2: raise ValueError("x must be at most 2d") if y.ndim > 1: raise ValueError("only univariate discrete variables supported") if not cp.issubdtype(y.dtype, cp.integer): raise ValueError("y should be an integer array") nvarx, ntrl = x.shape ym = cp.unique(y) if y.size != ntrl: raise ValueError("number of trials do not match") if not demeaned: x = x - x.mean(axis=1)[:, cp.newaxis] # class-conditional entropies ntrl_y = cp.zeros(len(ym)) hcond = cp.zeros(len(ym)) for n_yi, yi in enumerate(ym): idx = y == yi xm = x[:, idx] ntrl_y[n_yi] = xm.shape[1] xm = xm - xm.mean(axis=1)[:, cp.newaxis] cm = cp.dot(xm, xm.T) / float(ntrl_y[n_yi] - 1) chcm = cp.linalg.cholesky(cm) hcond[n_yi] = cp.sum(cp.log(cp.diagonal(chcm))) # class weights w = ntrl_y / float(ntrl) # unconditional entropy from unconditional Gaussian fit cx = cp.dot(x, x.T) / float(ntrl - 1) chc = cp.linalg.cholesky(cx) hunc = cp.sum(cp.log(cp.diagonal(chc))) # + c*nvarx ln2 = cp.log(2) if biascorrect: vars = cp.arange(1, nvarx + 1) psiterms = psi((ntrl - vars).astype(cp.float) / 2.) / 2. dterm = (ln2 - cp.log(float(ntrl - 1))) / 2. hunc = hunc - nvarx * dterm - psiterms.sum() dterm = (ln2 - cp.log((ntrl_y - 1).astype(cp.float))) / 2.0 psiterms = cp.zeros(len(ym)) for vi in vars: idx = ntrl_y - vi psiterms = psiterms + psi(idx.astype(cp.float) / 2.) hcond = hcond - nvarx * dterm - (psiterms / 2.) # MI in bits i = (hunc - cp.sum(w * hcond)) / ln2 return i
def cmi_1d_gpu_ggg(x, y, z, biascorrect=True, demeaned=False): """Conditional MI between two Gaussian variables conditioned on a third. I = cmi_ggg(x,y,z) returns the CMI between two (possibly multidimensional) Gaussian variables, x and y, conditioned on a third, z, with bias correction. Parameters ---------- x, y, z : array_like Gaussians arrays of shape (n_epochs,) or (n_dimensions, n_epochs). biascorrect : bool | True Specifies whether bias correction should be applied to the estimated MI demeaned : bool | False Specifies whether the input data already has zero mean (true if it has been copula-normalized) Returns ------- i : float Information shared by x and y conditioned by z (in bits) """ x, y, z = cp.atleast_2d(x), cp.atleast_2d(y), cp.atleast_2d(z) if x.ndim > 2 or y.ndim > 2 or z.ndim > 2: raise ValueError("x, y and z must be at most 2d") ntrl = x.shape[1] nvarx = x.shape[0] nvary = y.shape[0] nvarz = z.shape[0] nvaryz = nvary + nvarz nvarxy = nvarx + nvary nvarxz = nvarx + nvarz nvarxyz = nvarx + nvaryz if y.shape[1] != ntrl or z.shape[1] != ntrl: raise ValueError("number of trials do not match") # joint variable xyz = cp.vstack((x, y, z)) if not demeaned: xyz = xyz - xyz.mean(axis=1)[:, cp.newaxis] cxyz = cp.dot(xyz, xyz.T) / float(ntrl - 1) # submatrices of joint covariance cz = cxyz[nvarxy:, nvarxy:] cyz = cxyz[nvarx:, nvarx:] cxz = cp.zeros((nvarxz, nvarxz)) cxz[:nvarx, :nvarx] = cxyz[:nvarx, :nvarx] cxz[:nvarx, nvarx:] = cxyz[:nvarx, nvarxy:] cxz[nvarx:, :nvarx] = cxyz[nvarxy:, :nvarx] cxz[nvarx:, nvarx:] = cxyz[nvarxy:, nvarxy:] chcz = cp.linalg.cholesky(cz) chcxz = cp.linalg.cholesky(cxz) chcyz = cp.linalg.cholesky(cyz) chcxyz = cp.linalg.cholesky(cxyz) # entropies in nats # normalizations cancel for cmi hz = cp.sum(cp.log(cp.diagonal(chcz))) hxz = cp.sum(cp.log(cp.diagonal(chcxz))) hyz = cp.sum(cp.log(cp.diagonal(chcyz))) hxyz = cp.sum(cp.log(cp.diagonal(chcxyz))) ln2 = cp.log(2) if biascorrect: psiterms = psi( (ntrl - cp.arange(1, nvarxyz + 1)).astype(cp.float) / 2.) / 2. dterm = (ln2 - cp.log(ntrl - 1.)) / 2. hz = hz - nvarz * dterm - psiterms[:nvarz].sum() hxz = hxz - nvarxz * dterm - psiterms[:nvarxz].sum() hyz = hyz - nvaryz * dterm - psiterms[:nvaryz].sum() hxyz = hxyz - nvarxyz * dterm - psiterms[:nvarxyz].sum() # MI in bits i = (hxz + hyz - hxyz - hz) / ln2 return i
def mi_1d_gpu_gg(x, y, biascorrect=True, demeaned=False): """Mutual information (MI) between two Gaussian variables in bits. This is the GPU variant of the m1_1d_gg function, using CuPy I = mi_gg(x,y) returns the MI between two (possibly multidimensional) Gaussian variables, x and y, with bias correction. Parameters ---------- x, y : array_like Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs) biascorrect : bool | True Specifies whether bias correction should be applied to the estimated MI demeaned : bool | False Specifies whether the input data already has zero mean (true if it has been copula-normalized) Returns ------- i : float Information shared by x and y (in bits) """ x, y = cp.atleast_2d(x), cp.atleast_2d(y) if (x.ndim > 2) or (y.ndim > 2): raise ValueError("x and y must be at most 2d") nvarx, ntrl = x.shape nvary = y.shape[0] nvarxy = nvarx + nvary if y.shape[1] != ntrl: raise ValueError("number of trials do not match") # joint variable xy = cp.vstack((x, y)) if not demeaned: xy = xy - xy.mean(axis=1)[:, cp.newaxis] cxy = cp.dot(xy, xy.T) / float(ntrl - 1) # submatrices of joint covariance cx = cxy[:nvarx, :nvarx] cy = cxy[nvarx:, nvarx:] chcxy = cp.linalg.cholesky(cxy) chcx = cp.linalg.cholesky(cx) chcy = cp.linalg.cholesky(cy) # entropies in nats # normalizations cancel for mutual information hx = cp.sum(cp.log(cp.diagonal(chcx))) hy = cp.sum(cp.log(cp.diagonal(chcy))) hxy = cp.sum(cp.log(cp.diagonal(chcxy))) ln2 = cp.log(2) if biascorrect: psiterms = psi( (ntrl - cp.arange(1, nvarxy + 1)).astype(cp.float) / 2.) / 2. dterm = (ln2 - cp.log(ntrl - 1.)) / 2. hx = hx - nvarx * dterm - psiterms[:nvarx].sum() hy = hy - nvary * dterm - psiterms[:nvary].sum() hxy = hxy - nvarxy * dterm - psiterms[:nvarxy].sum() # MI in bits i = (hx + hy - hxy) / ln2 return i
def estimate_local_log_joint_mark_intensity(decoding_marks, encoding_marks, mark_std, occupancy, mean_rate, decoding_position=None, encoding_position=None, position_std=None, max_mark_value=6000, set_diag_zero=False, position_distance=None, sample_weights=None): """ Parameters ---------- decoding_marks : ndarray, shape (n_decoding_spikes, n_features) encoding_marks : ndarray, shape (n_encoding_spikes, n_features) mark_std : float or ndarray, shape (n_features,) occupancy : ndarray, shape (n_position_bins,) mean_rate : float place_bin_centers : ndarray, shape (n_position_bins, n_position_dims) encoding_positions : ndarray, shape (n_decoding_spikes, n_position_dims) position_std : float is_track_interior : None or ndarray, shape (n_position_bins,) max_mark_value : int set_diag_zero : bool Returns ------- log_joint_mark_intensity : ndarray, shape (n_decoding_spikes, n_position_bins) """ n_encoding_spikes, n_marks = encoding_marks.shape n_decoding_spikes = decoding_marks.shape[0] if sample_weights is None: sample_weights = cp.ones((1, n_decoding_spikes), dtype=cp.float32) denominator = n_encoding_spikes else: sample_weights = cp.atleast_2d(sample_weights) denominator = cp.sum(sample_weights) mark_distance = cp.ones((n_decoding_spikes, n_encoding_spikes), dtype=cp.float32) * sample_weights for mark_ind in range(n_marks): mark_distance *= normal_pdf_integer_lookup( cp.expand_dims(decoding_marks[:, mark_ind], axis=1), cp.expand_dims(encoding_marks[:, mark_ind], axis=0), std=mark_std, max_value=max_mark_value) if set_diag_zero: diag_ind = cp.diag_indices_from(mark_distance) mark_distance[diag_ind] = 0.0 if position_distance is None: position_distance = estimate_position_distance( decoding_position, encoding_position, position_std).astype(cp.float32) return cp.asnumpy( estimate_log_intensity( cp.sum(mark_distance * position_distance.T, axis=1) / denominator, occupancy, mean_rate))
def __init__(self, arg1, shape=None, dtype=None, copy=False): if shape is not None and len(shape) != 2: raise ValueError( 'Only two-dimensional sparse arrays are supported.') if base.issparse(arg1): x = arg1.asformat(self.format) data = x.data row = x.row col = x.col if arg1.format != self.format: # When formats are differnent, all arrays are already copied copy = False if shape is None: shape = arg1.shape self.has_canonical_format = x.has_canonical_format elif _util.isshape(arg1): m, n = arg1 m, n = int(m), int(n) data = cupy.zeros(0, dtype if dtype else 'd') row = cupy.zeros(0, dtype='i') col = cupy.zeros(0, dtype='i') # shape and copy argument is ignored shape = (m, n) copy = False self.has_canonical_format = True elif _scipy_available and scipy.sparse.issparse(arg1): # Convert scipy.sparse to cupyx.scipy.sparse x = arg1.tocoo() data = cupy.array(x.data) row = cupy.array(x.row, dtype='i') col = cupy.array(x.col, dtype='i') copy = False if shape is None: shape = arg1.shape self.has_canonical_format = x.has_canonical_format elif isinstance(arg1, tuple) and len(arg1) == 2: try: data, (row, col) = arg1 except (TypeError, ValueError): raise TypeError('invalid input format') if not (base.isdense(data) and data.ndim == 1 and base.isdense(row) and row.ndim == 1 and base.isdense(col) and col.ndim == 1): raise ValueError('row, column, and data arrays must be 1-D') if not (len(data) == len(row) == len(col)): raise ValueError( 'row, column, and data array must all be the same length') self.has_canonical_format = False elif base.isdense(arg1): if arg1.ndim > 2: raise TypeError('expected dimension <= 2 array or matrix') dense = cupy.atleast_2d(arg1) row, col = dense.nonzero() data = dense[row, col] shape = dense.shape self.has_canonical_format = True else: raise TypeError('invalid input format') if dtype is None: dtype = data.dtype else: dtype = numpy.dtype(dtype) if dtype != 'f' and dtype != 'd' and dtype != 'F' and dtype != 'D': raise ValueError('Only float32, float64, complex64 and complex128' ' are supported') data = data.astype(dtype, copy=copy) row = row.astype('i', copy=copy) col = col.astype('i', copy=copy) if shape is None: if len(row) == 0 or len(col) == 0: raise ValueError( 'cannot infer dimensions from zero sized index arrays') shape = (int(row.max()) + 1, int(col.max()) + 1) if len(data) > 0: if row.max() >= shape[0]: raise ValueError('row index exceeds matrix dimensions') if col.max() >= shape[1]: raise ValueError('column index exceeds matrix dimensions') if row.min() < 0: raise ValueError('negative row index found') if col.min() < 0: raise ValueError('negative column index found') sparse_data._data_matrix.__init__(self, data) self.row = row self.col = col if not _util.isshape(shape): raise ValueError('invalid shape (must be a 2-tuple of int)') self._shape = int(shape[0]), int(shape[1])
def nullspace_gpu(A, tol=1e-13): A = cp.atleast_2d(A) u, s, vh =svd_gpu(A) nnz = (s >= tol).sum() ns = vh[nnz:].conj().T return ns