def _masked_column_median(arr, masked_value): """Compute the median of each column in the 2D array arr, ignoring any instances of masked_value""" mask = _get_mask(arr, masked_value) if arr.size == 0: return cp.full(arr.shape[1], cp.nan) arr_sorted = arr.copy() if not cp.isnan(masked_value): # If nan is not the missing value, any column with nans should # have a median of nan nan_cols = cp.any(cp.isnan(arr), axis=0) arr_sorted[mask] = cp.nan else: nan_cols = cp.full(arr.shape[1], False) # nans are always sorted to end of array arr_sorted = cp.sort(arr_sorted, axis=0) count_missing_values = mask.sum(axis=0) # Ignore missing values in determining "halfway" index of sorted # array n_elems = arr.shape[0] - count_missing_values # If no elements remain after removing missing value, median for # that colum is nan nan_cols = cp.logical_or(nan_cols, n_elems <= 0) col_index = cp.arange(arr_sorted.shape[1]) median = (arr_sorted[cp.floor_divide(n_elems - 1, 2), col_index] + arr_sorted[cp.floor_divide(n_elems, 2), col_index]) / 2 median[nan_cols] = cp.nan return median
def corr_pairwise(x, y, return_pearson=False): """Covariance and Pearson product-moment correlation coefficients on the GPU for paired data with tolerance of NaNs. Curently only supports rows as samples and columns as observations. Parameters ---------- x : array_like The baseline array of values. y : array_like The comparison array of values. Returns ------- corr : cupy ndarray Array of correlation values """ def _cov_pairwise(x1, x2, factor): return cupy.nansum(x1 * x2, axis=1, keepdims=True) * cupy.true_divide( 1, factor) # Coerce arrays into 2D format and set dtype dtype = cupy.result_type(x, y, cupy.float64) x = cupy.asarray(x, dtype=dtype) y = cupy.asarray(y, dtype=dtype) assert x.shape == y.shape if x.ndim < 2: x = x[None, :] y = y[None, :] n_samples, n_obs = x.shape # Calculate degrees of freedom for each sample pair ddof = 1 nan_count = (cupy.isnan(x) | cupy.isnan(y)).sum(axis=1, keepdims=True) fact = n_obs - nan_count - ddof # Mean normalize x -= cupy.nanmean(x, axis=1, keepdims=True) y -= cupy.nanmean(y, axis=1, keepdims=True) # Calculate covariance matrix corr = _cov_pairwise(x, y, fact) if return_pearson: x_corr = _cov_pairwise(x, x, fact) y_corr = _cov_pairwise(y, y, fact) auto_corr = cupy.sqrt(x_corr) * cupy.sqrt(y_corr) corr = corr / auto_corr corr = cupy.clip(corr.real, -1, 1, out=corr.real) return corr return corr.squeeze()
def select_valid_oof(y, oof): if isinstance(oof, cupy.ndarray): if len(oof.shape) == 1: idx = cupy.argwhere(~cupy.isnan(oof[:])).ravel() elif len(oof.shape) == 2: idx = cupy.argwhere(~cupy.isnan(oof[:, 0])).ravel() elif len(oof.shape) == 3: idx = cupy.argwhere(~cupy.isnan(oof[:, 0, 0])).ravel() else: raise ValueError(f'Unsupported shape:{oof.shape}') return y.iloc[idx] if hasattr(y, 'iloc') else y[idx], oof[idx] else: return ToolBox.select_valid_oof(y, oof)
def gpu_resize(dPhi, dAmp, src_x, src_y, nx, ny): ratio_x = nx/src_x ratio_y = ny/src_y # print(ratio_x , ratio_y) dPhi[cupy.isnan(dPhi)] = 0 dPhi[cupy.isinf(dPhi)] = 0 dAmp[cupy.isnan(dAmp)] = 1 dAmp[cupy.isinf(dAmp)] = 1 dAmp[cupy.equal(dAmp,0.0)] = 0.01; dAmp = cupy.absolute(dAmp) dPhi = cupyx.scipy.ndimage.zoom(dPhi, (ratio_y,ratio_x)) dAmp = cupyx.scipy.ndimage.zoom(dAmp, (ratio_y,ratio_x)) dField = cupy.log(dAmp) + 1j*(dPhi) return dField
def test_min_max_axis_extremes(sparse_extremes, axis, ignore_nan): X_sparse_np, X_sparse = sparse_extremes cu_min, cu_max = cu_min_max_axis(X_sparse, axis=axis, ignore_nan=ignore_nan) sk_min, sk_max = sk_min_max_axis(X_sparse_np, axis=axis, ignore_nan=ignore_nan) if axis is not None: assert_allclose(cu_min, sk_min) assert_allclose(cu_max, sk_max) else: assert cu_min == sk_min or (cp.isnan(cu_min) and np.isnan(sk_min)) assert cu_max == sk_max or (cp.isnan(cu_max) and np.isnan(sk_max))
def test_erfcinv_behavior(self, dtype): a = cupy.empty((1,), dtype=dtype) a[:] = 2.0 + 1E-6 a = cupyx.scipy.special.erfcinv(a) assert cupy.isnan(a) a[:] = 0.0 - 1E-6 a = cupyx.scipy.special.erfcinv(a) assert cupy.isnan(a) a[:] = 0.0 a = cupyx.scipy.special.erfcinv(a) assert numpy.isposinf(cupy.asnumpy(a)) a[:] = 2.0 a = cupyx.scipy.special.erfcinv(a) assert numpy.isneginf(cupy.asnumpy(a))
def __init__(self, env_arr, seed=300): """Initialize environment. Pad env with NaN in every axis. It will help us to find neighbours of cells even if they are edges. env should have 3 dimensions (XYZ). {_agents_positions} is an array of current XYZ position for every agent. {agents_state} is an array of agents states. 1 - live, 0 - dead {is_available_env} - env-like boolean array, where True indicates free cell :param env_arr: array of environment, where agents born. move and die. """ self._raw_env = env_arr self.env = cp.pad(cp.array(env_arr).astype(cp.float), 1, mode='constant', constant_values=cp.NaN) self.agents_state = None self._agents_positions = None self._agents_positions_all_time = [] self.is_available_env = ~cp.isnan(self.env) self._rng = np.random.default_rng(seed) np.random.seed(seed) cp.random.seed(seed)
def indneutralize(y, X): # 采用最小二乘法拟合 # 保证二者同维度 if X.shape[0] != y.shape[0]: X = X.T # 如果仍然不相等,那就报错 if X.shape[0] != y.shape[0]: raise ValueError("X和y不同维度") result = cp.zeros(len(y)) # index = cp.arange(len(y)) bool_y = cp.isnan(y) | cp.isinf(y) # X中的每一行矩阵加和!=1 的地方 bool_X = ~(cp.abs(cp.sum(X, axis=1) - 1) < 0.000001) bool_na = bool_y | bool_X # used_index = index[~bool_na] used_y = y[~bool_na] used_X = X[~bool_na] b = cp.linalg.inv(used_X.T.dot(used_X)).dot(used_X.T).dot(used_y) pred_used_y = used_X.dot(b) result[~bool_na] = used_y - pred_used_y result[bool_na] = cp.nan return result
def dumptomtrx(self): amplifyfactor = 5 tilefactor = 0 inputdata = [] outputdata = [] for x in range(len(self.Board2Drops.getBoardslist())): inputdata.append(self.Board2Drops.getBoardbyIndex(x)) droppoints = self.Board2Drops.getContainbyIndex(x) windroppoints = droppoints[0] losedroppoints = droppoints[1] winboard = np.zeros(64) loseboard = np.zeros(64) for droppoint in windroppoints: winboard[droppoint[0] * 8 + droppoint[1]] += 1 for droppoint in losedroppoints: loseboard[droppoint[0] * 8 + droppoint[1]] += 1 finalboard = (winboard - loseboard / (winboard + loseboard)) * amplifyfactor where_are_NaNs = np.isnan(finalboard) (finalboard)[where_are_NaNs] = tilefactor outputdata.append(finalboard.tolist()) InputData = io.RAWWriter() io.writeAMatrix(np.array(tuple(inputdata)), InputData) InputData.write('in_policy.mtrx') OutputData = io.RAWWriter() io.writeAMatrix(np.array(tuple(outputdata)), OutputData) OutputData.write('out_policy.mtrx')
def _check_mask(self, mask=None, require_mask=False): """Checks that the mask is: * The same shape as the data * Is an numpy ndarray (or subtype) * Does not contain any NaN entrie Parameters ---------- require_mask : bool (default : False) """ # Check that there is a mask if required _use_mask = mask if _use_mask is None: _use_mask = self.mask if require_mask and _use_mask is None: raise ValueError("Expected a mask, but got nothing!") # If we have a mask, check it if _use_mask is not None: # Check the mask inherets from an ndarray if not isinstance(_use_mask, np.ndarray): raise TypeError("mask is of type %s, " % type(_use_mask) + "must be numpy.ndarray") # Check if there is an nan-value in the mask if np.isnan(np.sum(_use_mask)): raise ValueError("NaNs in the data mask") # Check the mask and the values have the same shape if self.values.shape != _use_mask.shape: raise ValueError("shape mismatch: dataframe.values.shape = %s" % str(self.values.shape) + \ " but mask.shape = %s," % str(_use_mask.shape)) + \ "must identical"
def HaversineLocal(busMatrix, lineMatrix, haversine=True): MatrizOnibus = cp.copy(busMatrix) MatrizLinhas = cp.copy(lineMatrix) MatrizLinhas = cp.dsplit(MatrizLinhas, 2) MatrizOnibus = cp.dsplit(MatrizOnibus, 2) infVector = cp.squeeze(cp.sum(cp.isnan(MatrizLinhas[0]), axis=1), axis=-1) MatrizLinhas[0] = cp.expand_dims(MatrizLinhas[0], axis=-1) MatrizLinhas[1] = cp.expand_dims(MatrizLinhas[1], axis=-1) MatrizOnibus[0] = cp.expand_dims(MatrizOnibus[0], axis=-1) MatrizOnibus[1] = cp.expand_dims(MatrizOnibus[1], axis=-1) MatrizOnibus[0] *= cp.pi / 180 MatrizOnibus[1] *= cp.pi / 180 MatrizLinhas[1] = cp.transpose(MatrizLinhas[1], [2, 3, 0, 1]) * cp.pi / 180 MatrizLinhas[0] = cp.transpose(MatrizLinhas[0], [2, 3, 0, 1]) * cp.pi / 180 # Haversine or euclidian, based on <haversine> if haversine: results = 1000*2*6371.0088*cp.arcsin( cp.sqrt( (cp.sin((MatrizOnibus[0] - MatrizLinhas[0])*0.5)**2 + \ cp.cos(MatrizOnibus[0])* cp.cos(MatrizLinhas[0]) * cp.sin((MatrizOnibus[1] - MatrizLinhas[1])*0.5)**2) )) else: results = cp.sqrt((MatrizOnibus[0] - MatrizLinhas[0])**2 + (MatrizOnibus[1] - MatrizLinhas[1])**2) return results, infVector
def _index_or_values_interpolation(column, index=None): """ Interpolate over a float column. assumes a linear interpolation strategy using the index of the data to denote spacing of the x values. For example the data and index [1.0, NaN, 4.0], [1, 3, 4] would result in [1.0, 3.0, 4.0] """ # figure out where the nans are mask = cp.isnan(column) # trivial cases, all nan or no nans num_nan = mask.sum() if num_nan == 0 or num_nan == len(column): return column to_interp = Frame(data={None: column}, index=index) known_x_and_y = to_interp._apply_boolean_mask(as_column(~mask)) known_x = known_x_and_y._index._column.values known_y = known_x_and_y._data.columns[0].values result = cp.interp(to_interp._index.values, known_x, known_y) # find the first nan first_nan_idx = (mask == 0).argmax().item() result[:first_nan_idx] = np.nan return result
def test_percentile_memory_access(self, dtype): # Create an allocator that guarantees array allocated in # cupy.percentile call will be followed by a NaN original_allocator = cuda.get_allocator() def controlled_allocator(size): memptr = original_allocator(size) base_size = memptr.mem.size assert base_size % 512 == 0 item_size = dtype().itemsize shape = (base_size // item_size, ) x = cupy.ndarray(memptr=memptr, shape=shape, dtype=dtype) x.fill(cupy.nan) return memptr # Check that percentile still returns non-NaN results a = testing.shaped_random((5, ), cupy, dtype) q = cupy.array((0, 100), dtype=dtype) cuda.set_allocator(controlled_allocator) try: percentiles = cupy.percentile(a, q, axis=None, method='linear') finally: cuda.set_allocator(original_allocator) assert not cupy.any(cupy.isnan(percentiles))
def sinhm(arr): """Hyperbolic Sine calculation for a given nXn matrix Args: arr(cupy.ndarray) : Square matrix with dimension nXn. Returns: (cupy.ndarray): Hyperbolic sine of given square matrix as input. ..seealso:: :func: 'scipy.linalg.sinhm' """ arr = cupy.array(arr) # Checking whether the input is a 2D matrix or not if (len(arr.shape) != 2): raise ValueError("Dimensions of matrix should be 2") # Checking whether the input matrix is square matrix or not if (arr.shape[0] != arr.shape[1]): raise ValueError("Input matrix should be a square matrix") # Checking whether the input matrix elements are nan or not if (cupy.isnan(cupy.cumsum(arr)[2 * arr.shape[0] - 1])): raise ValueError("Input matrix elements cannot be nan") # Checking whether the input matrix elements are infinity or not if (cupy.isinf(cupy.cumsum(arr)[2 * arr.shape[0] - 1])): raise ValueError("Input matrix elements cannot be infinity") return 0.5 * (cupy.exp(arr) - cupy.exp(-1 * arr))
def jitter(raster, window): ntrials, nbins = raster.shape # if needed, pad to be divisible by window if nbins % window: pad = cp.zeros((ntrials, -nbins % window)) raster = cp.concatenate([raster, pad], axis=1) nbins_rounded = raster.shape[1] n_jitter_bins = nbins_rounded // window # get psth psth = raster.mean(axis=0) # bin over window and sum raster_binned = cp.reshape(raster, (ntrials, window, n_jitter_bins)).sum(axis=1) psth_binned = cp.reshape(psth, (window, n_jitter_bins)).sum(axis=0) # determine correction correction = raster_binned / cp.expand_dims(psth_binned, 0) correction = cp.tile(cp.expand_dims(correction, 1), [1, window, 1]) correction = cp.reshape(correction, (ntrials, nbins_rounded)) # apply correction raster_jittered = cp.expand_dims(psth, 0) * correction # trim off padding raster_jittered = raster_jittered[:, :nbins] raster_jittered[cp.isnan(raster_jittered)] = 0 return raster_jittered
def _min_or_max_axis(X, axis, min_or_max): N = X.shape[axis] if N == 0: raise ValueError("zero-size array to reduction operation") M = X.shape[1 - axis] mat = X.tocsc() if axis == 0 else X.tocsr() mat.sum_duplicates() major_index, value = _minor_reduce(mat, min_or_max) not_full = np.diff(mat.indptr)[major_index] < N if 'min' in min_or_max: fminmax = np.fmin else: fminmax = np.fmax is_nan = np.isnan(value) value[not_full] = fminmax(value[not_full], 0) if 'nan' not in min_or_max: value[is_nan] = np.nan mask = value != 0 major_index = np.compress(mask, major_index) value = np.compress(mask, value) if axis == 0: res = gpu_sp.coo_matrix((value, (np.zeros(len(value)), major_index)), dtype=X.dtype, shape=(1, M)) else: res = gpu_sp.coo_matrix((value, (major_index, np.zeros(len(value)))), dtype=X.dtype, shape=(M, 1)) return res.A.ravel()
def weighted_avg_and_std(values, axis, weights): """Returns the weighted average and standard deviation. Parameters --------- values : array Data array of shape (time, variables). axis : int Axis to average/std about weights : array Weight array of shape (time, variables). Returns ------- (average, std) : tuple of arrays Tuple of weighted average and standard deviation along axis. """ values[np.isnan(values)] = 0. average = np.ma.average(values, axis=axis, weights=weights) variance = np.sum(weights * (values - np.expand_dims(average, axis))**2, axis=axis) / weights.sum(axis=axis) return (average, np.sqrt(variance))
def calc_cc(vol1, vol2, intrad, num_bins, mask=None): '''Calculate CC as a function of binned radius''' v1v2 = np.zeros(num_bins) v1v1 = np.zeros(num_bins) v2v2 = np.zeros(num_bins) if mask is None: mask = ~(np.isnan(vol1) | np.isnan(vol2)) scatter_add(v1v2, intrad[mask], vol1[mask] * vol2[mask]) scatter_add(v1v1, intrad[mask], vol1[mask]**2) scatter_add(v2v2, intrad[mask], vol2[mask]**2) denr = v1v1 * v2v2 sel = (denr > 0) v1v2[sel] /= np.sqrt(denr[sel]) return v1v2
def transform(self, X): """[summary]. Args: X (cupy.ndarray): [description]. Returns: cupy.ndarray: [description]. """ check_is_fitted(self, "class_means_") # TODO(smly): # X = column_or_1d(X, warn=True) # Label encoding if necessary if self._label_encoding_uniques is not None: X = self._label_encoding_uniques.get_indexer(X.to_pandas()) X = cupy.asarray(X) missing_mask = cupy.isnan(X) encode_mask = cupy.invert(missing_mask) unseen_mask = cupy.bitwise_xor( cupy.isin(X, self.classes_, invert=True), missing_mask) X = X.copy() X[unseen_mask] = cupy.max(self.classes_) indices = _get_index_cupy(self.classes_, X[encode_mask]) _classes_index_list = cupy.searchsorted(self.lut_[:, 0], self.classes_) encoded_values = cupy.zeros(X.shape[0], dtype=cupy.float32) encoded_values[encode_mask] = cupy.take( self.lut_[:, 1], cupy.take(_classes_index_list, indices)) encoded_values[unseen_mask] = self.default_unseen_ return encoded_values
def calc_stc(cum, gpu=False): """ Calculate STC (spatio-temporal consistensy; Hanssen et al., 2008, Terrafirma) of time series of displacement. Note that isolated pixels (which have no surrounding pixel) have nan of STC. Input: cum : Cumulative displacement (n_im, length, width) gpu : GPU flag Return: stc : STC (length, width) """ if gpu: import cupy as xp cum = xp.asarray(cum) else: xp = np n_im, length, width = cum.shape ### Add 1 pixel margin to cum data filled with nan cum1 = xp.ones((n_im, length + 2, width + 2), dtype=xp.float32) * xp.nan cum1[:, 1:length + 1, 1:width + 1] = cum ### Calc STC for surrounding 8 pixels _stc = xp.ones((length, width, 8), dtype=xp.float32) * xp.nan pixels = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1], [2, 2]] ## Left Top = [0, 0], Rigth Bottmon = [2, 2], Center = [1, 1] for i, pixel in enumerate(pixels): ### Spatial difference (surrounding pixel-center) d_cum = cum1[:, pixel[0]:length + pixel[0], pixel[1]:width + pixel[1]] - cum1[:, 1:length + 1, 1:width + 1] ### Temporal difference (double difference) dd_cum = d_cum[:-1, :, :] - d_cum[1:, :, :] ### STC (i.e., RMS of DD) sumsq_dd_cum = xp.nansum(dd_cum**2, axis=0) n_dd_cum = (xp.sum(~xp.isnan(dd_cum), axis=0)).astype(xp.float32) #nof non-nan n_dd_cum[n_dd_cum == 0] = xp.nan #to avoid 0 division _stc[:, :, i] = xp.sqrt(sumsq_dd_cum / n_dd_cum) ### Strange but some adjacent pixels can have identical time series, ### resulting in 0 of stc. To avoid this, replace 0 with nan. _stc[_stc == 0] = xp.nan ### Identify minimum value as final STC with warnings.catch_warnings(): ## To silence warning by All-Nan slice warnings.simplefilter('ignore', RuntimeWarning) stc = xp.nanmin(_stc, axis=2) if gpu: stc = xp.asnumpy(stc) del cum, cum1, _stc, d_cum, dd_cum, sumsq_dd_cum, n_dd_cum return stc
def test_niblack_sauvola_pathological_image(): # For certain values, floating point error can cause # E(X^2) - (E(X))^2 to be negative, and taking the square root of this # resulted in NaNs. Here we check that these are safely caught. # see https://github.com/scikit-image/scikit-image/issues/3007 value = 0.03082192 + 2.19178082e-09 src_img = cp.full((4, 4), value).astype(cp.float64) assert not cp.any(cp.isnan(threshold_niblack(src_img)))
def check_not_all_nans(series): """ Description: return True if length of series without NaN is greater than 0 """ if series.dtype not in CUDF_DATETIME_TYPES: return not all(cp.isnan(series)) return True
def eval_pdf_multi(self, x_kj, systs=None, kernel_2d=False, get=True): ''' Evaluates the signal's normalized PDF at a list-like series of points. If CuPy is present on the system, a CUDA kernel will be used to run this calculation on the default GPU. (See: KernelDensityPDF._kdpdf1_multi) ''' if systs is None: t_ij, h_ij, w_i = self.t_ij, self.h_ij, self.w_i else: t_ij, h_ij, w_i = systs x_kj = cp.asarray(x_kj) norm = cp.asarray(self._normalization(t_ij=t_ij, h_ij=h_ij, w_i=w_i)) if np == cp: return np.asarray([ KernelDensityPDF._kdpdf1(x_j, t_ij, h_ij, w_i) for x_j in x_kj ]) / norm else: if kernel_2d: # faster for fewer points, i*k memory requirements pdf_ki = cp.empty((x_kj.shape[0], t_ij.shape[0])) block_size = 32 k_grid_size = pdf_ki.shape[0] // block_size + 1 i_grid_size = pdf_ki.shape[1] // block_size + 1 KernelDensityPDF._kdpdf1_ki( (k_grid_size, i_grid_size), (block_size, block_size), (x_kj, t_ij, h_ij, w_i, t_ij.shape[0], t_ij.shape[1], x_kj.shape[0], pdf_ki)) pdf_k = cp.sum(pdf_ki, axis=1) pdf_k = pdf_k / cp.sum(self.w_i) / norm return pdf_k.get() if get else pdf_k else: pdf_k = cp.empty(x_kj.shape[0]) block_size = 64 grid_size = x_kj.shape[0] // block_size + 1 KernelDensityPDF._kdpdf1_k( (grid_size, ), (block_size, ), (x_kj, t_ij, h_ij, w_i, t_ij.shape[0], t_ij.shape[1], x_kj.shape[0], pdf_k)) pdf_k = pdf_k / cp.sum(self.w_i) / norm if cp.any(cp.isnan(pdf_k)): print('w_i sum:', cp.sum(self.w_i), 'norm:', norm) print('t_ij nan:', t_ij[cp.isnan(t_ij)]) print('h_ij nan:', h_ij[cp.isnan(h_ij)]) print('h_ij zero:', h_ij[h_ij == 0]) raise Exception('NaN value probability in ' + self.name) return pdf_k.get() if get else pdf_k
def test_min_max_axis(failure_logger, sparse_random_dataset, axis, ignore_nan): _, X, X_sparse_np, X_sparse = sparse_random_dataset X_sparse[0, 0] = np.nan X_sparse_np[0, 0] = np.nan cu_min, cu_max = cu_min_max_axis(X_sparse, axis=axis, ignore_nan=ignore_nan) sk_min, sk_max = sk_min_max_axis(X_sparse_np, axis=axis, ignore_nan=ignore_nan) if axis is not None: assert_allclose(cu_min, sk_min) assert_allclose(cu_max, sk_max) else: assert cu_min == sk_min or (cp.isnan(cu_min) and np.isnan(sk_min)) assert cu_max == sk_max or (cp.isnan(cu_max) and np.isnan(sk_max)) with pytest.raises(Exception): cu_min_max_axis(X, axis=axis, ignore_nan=ignore_nan)
def __lioness_loop(self): """ Description: Initialize instance of Lioness class and load data. Outputs: self.total_lioness_network: An edge-by-sample matrix containing sample-specific networks. """ for i in self.indexes: print("Running LIONESS for sample %d:" % (i+1)) idx = [x for x in range(self.n_conditions) if x != i] # all samples except i with Timer("Computing coexpression network:"): if self.computing=='gpu': import cupy as cp correlation_matrix = cp.corrcoef(self.expression_matrix[:, idx]) if cp.isnan(correlation_matrix).any(): cp.fill_diagonal(correlation_matrix, 1) correlation_matrix = cp.nan_to_num(correlation_matrix) correlation_matrix=cp.asnumpy(correlation_matrix) else: correlation_matrix = np.corrcoef(self.expression_matrix[:, idx]) if np.isnan(correlation_matrix).any(): np.fill_diagonal(correlation_matrix, 1) correlation_matrix = np.nan_to_num(correlation_matrix) with Timer("Normalizing networks:"): correlation_matrix_orig = correlation_matrix # save matrix before normalization correlation_matrix = self._normalize_network(correlation_matrix) with Timer("Inferring LIONESS network:"): if self.motif_matrix is not None: del correlation_matrix_orig subset_panda_network = self.panda_loop(correlation_matrix, np.copy(self.motif_matrix), np.copy(self.ppi_matrix),self.computing) else: del correlation_matrix subset_panda_network = correlation_matrix_orig lioness_network = self.n_conditions * (self.network - subset_panda_network) + subset_panda_network with Timer("Saving LIONESS network %d to %s using %s format:" % (i+1, self.save_dir, self.save_fmt)): path = os.path.join(self.save_dir, "lioness.%d.%s" % (i+1, self.save_fmt)) if self.save_fmt == 'txt': np.savetxt(path, lioness_network) elif self.save_fmt == 'npy': np.save(path, lioness_network) elif self.save_fmt == 'mat': from scipy.io import savemat savemat(path, {'PredNet': lioness_network}) else: print("Unknown format %s! Use npy format instead." % self.save_fmt) np.save(path, lioness_network) if i == 0: self.total_lioness_network = np.fromstring(np.transpose(lioness_network).tostring(),dtype=lioness_network.dtype) else: self.total_lioness_network=np.column_stack((self.total_lioness_network ,np.fromstring(np.transpose(lioness_network).tostring(),dtype=lioness_network.dtype))) return self.total_lioness_network
def isnan(x: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.isnan <numpy.isnan>`. See its docstring for more information. """ if x.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in isnan") return Array._new(np.isnan(x._array))
def _cholesky(B): """ Wrapper around `cupy.linalg.cholesky` that raises LinAlgError if there are NaNs in the output """ R = cupy.linalg.cholesky(B) if cupy.any(cupy.isnan(R)): raise numpy.linalg.LinAlgError return R
def main(): '''CLI entry point. Also shows usage pattern''' parser = argparse.ArgumentParser(description='CC vs radius/q calculator') parser.add_argument('volume1', help='Path to first volume ccp4 map') parser.add_argument('volume2', help='Path to second volume ccp4 map') parser.add_argument('-r', '--res_edge', help='Resolution at center-edge in A', type=float, default=-1.) parser.add_argument('-b', '--bin_size', help='Radial bin size in voxels. Default: 1', type=int, default=1) parser.add_argument('-o', '--out_fname', help='Path to output file. Default: cc.dat', default='cc.dat') args = parser.parse_args() with h5py.File(args.volume1, 'r') as fptr: vol1 = np.array(fptr['diff_intens'][:]) with h5py.File(args.volume2, 'r') as fptr: vol2 = np.array(fptr['diff_intens'][:]) mask = ~(np.isnan(vol1) | np.isnan(vol2)) assert vol1.shape == vol2.shape size = vol1.shape[-1] rbin = calc_rad(size, args.bin_size) num_bins = int(rbin.max() + 1) subtract_radavg(vol1, rbin, num_bins, mask) subtract_radavg(vol2, rbin, num_bins, mask) cc = calc_cc(vol1, vol2, rbin, num_bins, mask) if args.res_edge > 0.: cen = size // 2 q = np.arange(num_bins) * args.bin_size / cen / args.res_edge else: q = None save_to_file(args.out_fname, cc, q)
def _masked_column_mean(arr, masked_value): """Compute the mean of each column in the 2D array arr, ignoring any instances of masked_value""" mask = _get_mask(arr, masked_value) count_missing_values = mask.sum(axis=0) n_elems = arr.shape[0] - count_missing_values mean = cp.nansum(arr, axis=0) if not cp.isnan(masked_value): mean -= (count_missing_values * masked_value) mean /= n_elems return mean
def step(self, model, step_num): # expected signal self.y_expected = signal.get_signal_gpu(model.solution, model.detector_geometry) # multiplication self.mult = cp.divide(self.w_det, self.y_expected) self.mult = cp.where(cp.isnan(self.mult), 0, self.mult) self.mult = cp.sum(self.mult, axis=-1) self.mult /= self.wi # find delta model.solution = model.solution * self.mult
def _zdivide(x, y): """Patched version of :func:`sporco.linalg.zdivide`.""" div = x / y div[cp.logical_or(cp.isnan(div), cp.isinf(div))] = 0 return div