def corr2cova(corr: _np.ndarray) -> _np.ndarray: """Converts sinex CORR matrix to COVA using the diagonal STD values""" D = corr.diagonal() * corr.diagonal()[:, None] _np.fill_diagonal( corr, 1) # fill COVA diagonal with 1 so we only multiply with D to get COVA return corr * D
def quantum_contrast(matrix: np.ndarray) -> float: """Calculate the quantum contrast from the diagonals of a coincidence matrix. The quantum contrast is the average of the diagonal divided by the average of the off-diagonal The input should be a coincidence matrix """ off_diag_avg = (matrix.sum() - matrix.diagonal().sum()) / (matrix.shape[0] * (matrix.shape[1] - 1)) return np.average(matrix.diagonal()) / off_diag_avg
def _approx_permutation_2sided_1trans_normal1(a: np.ndarray) -> np.ndarray: # This assumes that array_a has all positive entries, this guess does not match that found # in the notes/paper because it doesn't include the sign function. # build the empty target array array_c = np.zeros(a.shape) # Fill the first row of array_c with diagonal entries array_c[0, :] = a.diagonal() array_mask = ~np.eye(a.shape[0], dtype=bool) # get all the non-diagonal element array_c_non_diag = (a[array_mask]).T.reshape(a.shape[0], a.shape[1] - 1) array_c_non_diag = array_c_non_diag[ np.arange(np.shape(array_c_non_diag)[0])[:, np.newaxis], np.argsort(abs(array_c_non_diag)) ] # form the right format in order to combine with matrix A array_c_sorted = np.fliplr(array_c_non_diag).T # fill the array_c with array_c_sorted array_c[1:, :] = array_c_sorted # the weight matrix weight_c = np.zeros(a.shape) weight_p = np.power(2, -0.5) for weight in range(a.shape[0]): weight_c[weight, :] = np.power(weight_p, weight) # build the new matrix array_new array_new = np.multiply(array_c, weight_c) return array_new
def get_init_mat(P_1_0: np.ndarray) \ -> Tuple[int, np.ndarray, np.ndarray, np.ndarray]: """ Get information for diffuse initialization Parameters: ---------- P_1_0 : initial state covariance. Diagonal value np.nan if diffuse Returns: ---------- number_diffuse : number of diffuse state A : selection matrix for diffuse states, equal to P_inf Pi : selection matrix for stationary states P_star : non-diffuse part of P_1_0 """ is_diffuse = np.isnan(P_1_0.diagonal()) number_diffuse = np.count_nonzero(is_diffuse) A = np.diag(is_diffuse.astype(float)) Pi = np.diag((~is_diffuse).astype(float)) P_clean = P_1_0.copy() P_clean[np.isnan(P_clean)] = 0 P_star = Pi.dot(P_clean).dot(Pi.T) return number_diffuse, A, Pi, P_star
def plot_var(fund_num: int, cov_mat: np.ndarray, highlight: list) -> None: var = cov_mat.diagonal() np.savetxt(os.path.join(os.getcwd(), 'tmp/group_var.csv'), var) green_x = list() for i in range(fund_num): if i + 1 not in highlight: green_x.append(i + 1) green_y = list() for i in green_x: green_y.append(var[i - 1]) red_x = highlight red_y = list() for i in red_x: red_y.append(var[i - 1]) f = plt.figure() matplotlib.rc('xtick', labelsize=8) matplotlib.rc('ytick', labelsize=10) plt.bar(green_x, green_y, tick_label=green_x) plt.bar(red_x, red_y, tick_label=red_x) plt.xticks(rotation=270) plt.xlabel('group') plt.ylabel('variance') plt.title("variance of each group") f.savefig(os.path.join(os.getcwd(), 'figs/group_var.pdf'))
def cov_to_std(cov: np.ndarray) -> np.ndarray: '''Compute standard deviation from co variance matrix ''' diag = cov.diagonal() std = np.sqrt(diag) return std
def extendOneNode(X: np.ndarray, Y: np.ndarray): # print(X,Y) n = X.shape[0] xLabelNodes = X.diagonal()#np.unique(X.diagonal()) extensions = [] for i,lNode in enumerate(xLabelNodes): # print("node",lNode,"i",i) indices = np.where(Y.diagonal() == lNode)[0] for iY in indices: for j in np.where(Y[iY] > 0)[0]: if j != iY: pad = np.zeros((1,n+1),dtype=int) pad[0,-1] = Y[j,j] pad[0,i] = Y[iY,j] extensions.append(extend(X.copy(),pad)) return extensions
def visibility(matrix: np.ndarray) -> float: """Calculate the visibility of the state. The visibility is the sum of the diagonal divided by the sum over the whole coincidence matrix The input should be a coincidence matrix """ return matrix.diagonal().sum() / matrix.sum()
def diagonals(arr: np.ndarray) -> Lines: if arr.ndim == 1: diags.append(arr) else: diagonals(arr.diagonal()) diagonals(np.flip(arr, 0).diagonal()) return diags
def evaluate(self, state: np.ndarray): tic = time() situations = self.situations to_str = lambda array: ''.join(map(str, array)) size = len(state) # the opponent state 1 --> 2, 2 --> 1 opponent_state = state.copy() opponent_state[opponent_state == 1] = 3 opponent_state[opponent_state == 2] = 1 opponent_state[opponent_state == 3] = 2 opponent_reverse_state = opponent_state[::-1] my_record = defaultdict(int) opponent_record = defaultdict(int) reverse_state = state[::-1] my_horizontal, my_vertical = [to_str(x) for x in state], [ to_str(state[..., x]) for x in range(size) ] my_lr_diag, my_rl_diag = [], [] op_horizontal, op_vertical = [to_str(x) for x in opponent_state], [ to_str(opponent_state[..., x]) for x in range(size) ] op_lr_diag, op_rl_diag = [], [] for i in range(-size, size): my_lr_diag.append(to_str(state.diagonal(i))) my_rl_diag.append(to_str(reverse_state.diagonal(i))) op_lr_diag.append(to_str(opponent_state.diagonal(i))) op_rl_diag.append(to_str(opponent_reverse_state.diagonal(i))) # pprint(my_horizontal) # pprint(my_vertical) # pprint(my_lr_diag) # pprint(my_rl_diag) my_record = self._check_lines(my_record, *my_horizontal, *my_vertical, *my_lr_diag, *my_rl_diag) opponent_record = self._check_lines(opponent_record, *op_horizontal, *op_vertical, *op_lr_diag, *op_rl_diag) my_mark = sum( map(lambda situ: situations[situ] * my_record[situ], my_record.keys())) opponent_mark = sum( map(lambda situ: situations[situ] * opponent_record[situ], opponent_record.keys())) self.evaluate_time += (time() - tic) self.evaluate_num += 1 # print('evaluate:', self.evaluate_num) return (my_mark - 1.1 * opponent_mark) if self.color == 1 else (opponent_mark - 1.1 * my_mark)
def recall(matrix: np.ndarray) -> np.ndarray: """ Calculate recall of prediction :param matrix: Confusion matrix :return: Recall for every class """ return np.divide(matrix.diagonal(), matrix.sum(axis=0))
def precision(matrix: np.ndarray) -> np.ndarray: """ Calculate precision of prediction :param matrix: Confusion matrix :return: Precision for every class """ return np.divide(matrix.diagonal(), matrix.sum(axis=1))
def accuracy(matrix: np.ndarray) -> float: """ Calculate accuracy of prediction :param matrix: Confusion matrix calculated :return: Accuracy """ return matrix.diagonal().sum() / matrix.sum()
def non_diag(a: np.ndarray): """ Remove the Diagonal Entries from a matrix :param a: 2D Numpy Array to operate on :return: A copy of the matrix with diagonal elements zeroed out """ return a - np.diagflat(a.diagonal())
def gaussian_predictions( model: GPModel, x_test: torch.Tensor, expected_mu: np.ndarray, expected_s: np.ndarray, ): """ Every GP model with a gaussian likelihood needs the same set of tests run on its ._predict() method. """ # Predictions without full covariance mu_diag, s_diag = model._predict(x_test, diag=True) assert isinstance(mu_diag, torch.Tensor) assert isinstance(s_diag, torch.Tensor) assert mu_diag.shape[0] == x_test.shape[0] assert mu_diag.shape[1] == model.Y.shape[1] assert all([ss == ms for ss, ms in zip(mu_diag.shape, s_diag.shape)]) assert all( [ a == pytest.approx(e) for a, e in zip(mu_diag.detach().numpy().flatten(), expected_mu.flatten()) ] ) assert all( [ a == pytest.approx(e) for a, e in zip( s_diag.detach().numpy().flatten(), expected_s.diagonal().flatten() ) ] ) # Predictions with full covariance mu_full, s_full = model._predict(x_test, diag=False) assert isinstance(mu_full, torch.Tensor) assert isinstance(s_full, torch.Tensor) assert mu_full.shape[0] == x_test.shape[0] assert mu_full.shape[1] == model.Y.shape[1] assert all([ss == x_test.shape[0] for ss in s_full.shape]) assert all( [ a == pytest.approx(e) for a, e in zip(mu_full.detach().numpy().flatten(), expected_mu.flatten()) ] ) assert all( [ a == pytest.approx(e) for a, e in zip(s_full.detach().numpy().flatten(), expected_s.flatten()) ] )
def diagonal(m: np.ndarray) -> np.ndarray: """ Get the diagonal of the `feature x feature` matrix for each output. :param m: array of shape `(n_outputs, n_features, n_features)` :return: array of shape `(n_outputs, n_features)`, with the diagonals of arg ``m`` """ assert m.ndim == 3 assert m.shape[1] == m.shape[2] return m.diagonal(axis1=1, axis2=2)
def recall_per_class(matrix: np.ndarray) -> np.ndarray: """ Compute the recall per class based on the confusion matrix. :param matrix: the computed confusion matrix :return: the recall (per class), defined as TP/(TP+FN) """ tp_per_class = matrix.diagonal() sum_tp_fn_per_class = matrix.sum(1) return divide_arrays_with_possible_zeros(tp_per_class, sum_tp_fn_per_class)
def cm_to_accuracies(cm: np.ndarray) -> np.ndarray: """ Convert confusion matrix to an array with accuracy for each class. :param cm: the confusion matrix. :return: Accuracy for each class """ # Divide confusion matrix row's values with their sums. cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # Return the diagonal, which is the accuracy of each class. return cm.diagonal()
def apply_along_diags(func: Callable, mat: np.ndarray, offsets: Iterable, filter_fn: Callable = None) -> Generator: """Apply a function to a cetain set of diagonals. :param func: Callable. Function applied to each diagonal. :param mat: np.ndarray. 2d ndarray. :param offsets: list. List of diagonal offsets. :param filter_fn: Callable. Function applied to each daigonal, should return a mask. :return: Generator. Yielding the result of applying func to each diagonal. """ max_len = mat.shape[0] offsets = tuple(offsets) if filter_fn is None: for offset in offsets: if offset >= max_len: break diag = mat.diagonal(offset) yield func(diag) else: diag = mat.diagonal(offsets[len(offsets) - 1]) res = func(diag) if isinstance(res, np.ndarray): for offset in offsets: if offset >= max_len: break diag = mat.diagonal(offset) mask = filter_fn(diag) zeros = np.zeros_like(diag) zeros[mask] = func(diag[mask]) yield zeros, mask else: for offset in offsets: if offset >= max_len: break diag = mat.diagonal(offset) yield func(diag[filter_fn(diag)])
def get_inductances(L: np.ndarray, length: float = 1.0): '''Returns Le and Ke matrices of inductor values and coupling coefficients Input is per unit inductance matrix, size N x N Le are individual inductor values, size N Ke are coupling coefficients, size N x N''' assert L.ndim == 2 Le = L.diagonal() * length N = L.shape[0] Ke = np.zeros_like(L) for i in range(N): for j in range(N): Ke[i, j] = L[i, j] / np.sqrt(L[i, i] * L[j, j]) return Le, Ke
def canonicalForm(graph: np.ndarray,embeddings=None): labelNodes = graph.diagonal() start = np.zeros((1,1),dtype=int) maxNodes = np.where(labelNodes == np.max(labelNodes))[0] start[0,0] = np.max(labelNodes) canonical = { "code" : '' } for idStart in maxNodes: S = { "tree" : start, "index" : np.array([idStart]), "code" : encodeGraph(start) } while (len(S["index"]) < len(labelNodes)): # trees = [] newCandidates = {} for i in range(graph.shape[0]): if i in S["index"]: continue Q = [] t = S["tree"] for id,j in enumerate(S["index"]): if graph[i,j] == 0: continue rowExpand = np.zeros((1,t.shape[0]),dtype=int) rowExpand[0,id] = graph[i,j] tree = np.r_[t,rowExpand] colExpand = np.zeros((tree.shape[0],1),dtype=int) colExpand[id,0] = graph[i,j] colExpand[tree.shape[0]-1,0] = graph[i,i] tree = np.c_[tree,colExpand] indexTree = np.concatenate([S["index"],np.array([i])]) codeTree = encodeGraph(tree) newCandidates[codeTree] = { "tree" : tree, "index" : indexTree, "code" : codeTree } S = newCandidates[max(newCandidates.keys())] canonical = S if canonical["code"] < S["code"] else canonical if embeddings is not None: for k in embeddings.keys(): topo = [] for subNodes in embeddings[k]: reindexedNodes = np.array([subNodes[idNode] for idNode in canonical['index']]) topo.append(reindexedNodes) embeddings[k] = topo return canonical
def solve_linear_system_sparse( A: np.ndarray, u: np.ndarray ) -> np.ndarray: ''' funcao que resolve um sistema linear da forma Ax = u para A matriz trigonal simétrica esparsa, retornando o vetor x @parameters: - A: dim = ((N-1)x(N-1)), matriz quadrada de entrada do sistema - u: dim = ((N-1)x1), vetor de saida da equacao @output: - x: dim = ((N-1)x1), vetor de incognitas ''' # A.x = L.D.Lt.x a = np.asarray(A.diagonal(0)).ravel() N = a.shape[0] + 1 b = np.concatenate((np.array([0]), np.asarray(A.diagonal(1)).ravel())) d, l = perform_ldlt_transformation_sparse(a, b) # Como explicado no relatorio, resolvemos o problema atraves da reso- # lucao de 3 loops: primeiro resolvemos em z, depois em y e ai sim em # x #Loop em z: z = np.zeros(N-1) z[0] = u[0] for i in range(1, N-1): z[i] = u[i] - z[i-1]*l[i] #Loop em y: y = np.zeros(N-1) for i in range(N-1): y[i] = z[i]/d[i] #Loop em x: x = np.zeros(N-1) x[-1] = y[-1] for i in range(N-3, -1, -1): x[i] = y[i] - x[i+1]*l[i+1] return x
def search(mat: np.ndarray, row: int) -> Generator[np.ndarray, None, None]: assert row >= 0 if row == N: yield mat for col in [ i for i in range(N) if np.all(mat[:row, i] == 0) # not occupied horizontally and np.all(mat.diagonal(i - row) == 0) # not occupied on the main diagonal and np.all(np.fliplr(mat).diagonal(N - 1 - i - row) == 0) # not occupied on the opposite diagonal ]: mat[row, col] = 1 yield from search(mat, row + 1) mat[row, col] = 0
def terminal_test_great(self, state: np.ndarray, action): continuity_test = lambda string: '11111' in string or '22222' in string x, y = action board_size = len(state) horizontal = ''.join(map(str, state[x, max(0, y - 4): min(board_size, y + 5)])) vertical = ''.join(map(str, state[max(0, x - 4): min(board_size, x + 5), y])) lr_diag = ''.join(map(str, state.diagonal(y - x))) rl_diag = ''.join(map(str, state[:, ::-1].diagonal(14 - x - y))) if continuity_test(horizontal) or continuity_test(vertical) or continuity_test(lr_diag) or continuity_test(rl_diag): return True return False
def precision_per_class(matrix: np.ndarray) -> np.ndarray: """ Compute the precision per class based on the confusion matrix. :param matrix: the computed confusion matrix :return: the precision (per class), defined as TP/(TP+FP) """ if not matrix.shape[0] == matrix.shape[1]: # If the matrix is not square (there is a column for "other" label), the "other" column is deleted. # Otherwise, there will be 3 elements in TP and 4 in TP+FP meaning they can't be divided. matrix = np.delete(matrix, -1, 1) tp_per_class = matrix.diagonal() sum_tp_fp_per_class = matrix.sum(0) return divide_arrays_with_possible_zeros(tp_per_class, sum_tp_fp_per_class)
def evaluate(self, state: np.ndarray): tic = time() situations = self.situations to_str = lambda array: ''.join(map(str, array)) # the opponent state 1 --> 2, 2 --> 1 opponent_state = state.copy() opponent_state[opponent_state == 1] = 3 opponent_state[opponent_state == 2] = 1 opponent_state[opponent_state == 3] = 2 print(state) print(opponent_state) my_record = defaultdict(int) opponent_record = defaultdict(int) reverse_state = state[::-1] for i in range(len(state)): horizontal = to_str(state[i]) vertical = to_str(state[..., i]) lr_diag = to_str(state.diagonal(i)) rl_diag = to_str(reverse_state.diagonal(-i)) my_record = self._check_lines(my_record, horizontal, vertical, lr_diag, rl_diag) horizontal = to_str(opponent_state[i]) vertical = to_str(opponent_state[..., i]) lr_diag = to_str(opponent_state.diagonal(i)) rl_diag = to_str(opponent_state.diagonal(-i)) opponent_record = self._check_lines(opponent_record, horizontal, vertical, lr_diag, rl_diag) my_mark = sum( map(lambda situ: situations[situ] * my_record[situ], my_record.keys())) opponent_mark = sum( map(lambda situ: situations[situ] * opponent_record[situ], opponent_record.keys())) self.evaluate_time += (time() - tic) self.evaluate_num += 1 return (my_mark - opponent_mark) if self.color == 1 else (opponent_mark - my_mark)
def extendFFSM(self, X: np.ndarray, Y: np.ndarray): pos = np.where(Y.diagonal() == X[-1, -1])[0] n = X.shape[0] extensions = [] for p in pos: if p < n - 1: indices = np.where(Y[p, p + 1:] > 0)[0] # indices = np.where(Y[p] > 0)[0] for i in indices: # if i!=p: pad = np.zeros((1, n + 1), dtype=int) # pad[0,-1] = Y[i,i] # pad[0,-2] = Y[p,i] pad[0, -1] = Y[p + i + 1, p + i + 1] pad[0, -2] = Y[p, p + i + 1] extensions.append(self.extend(X, pad)) return extensions
def logit_tpm(tpm: _np.ndarray) -> _np.ndarray: """Transform tpm to logit space for unconstrained optimization. Note: There must be no zeros on the main diagonal. Args: tpm (np.ndarray) Transition probability matrix. Returns: (np.nadarray) lg_tpm of shape (1, m**2-m). """ assert_st_matrix(tpm) logits = _np.log(tpm / tpm.diagonal()[:, None]) lg_tpm = get_off_diag(logits) return lg_tpm
def diagonalWinCheck(board: np.ndarray) -> int: diags = [ board[::-1, :].diagonal(i) for i in range(-board.shape[0] + 1, board.shape[1]) ] diags.extend( board.diagonal(i) for i in range(board.shape[1] - 1, -board.shape[0], -1)) for i in diags: if any( sum(1 for _ in islice(g, 4)) == 4 for k, g in groupby(i) if k == 1): return 1 elif any( sum(1 for _ in islice(g, 4)) == 4 for k, g in groupby(i) if k == 2): return 2 return 0
def pauli_twirl_chi_matrix(chi_matrix: np.ndarray) -> np.ndarray: r""" Implements a Pauli twirl of a chi matrix (aka a process matrix). See the folloiwng reference for more details [SPICC] Scalable protocol for identification of correctable codes Silva et al., PRA 78, 012347 2008 http://dx.doi.org/10.1103/PhysRevA.78.012347 https://arxiv.org/abs/0710.1900 Note: Pauli twirling a quantum channel can give rise to a channel that is less noisy; use with care. :param chi_matrix: a dim**2 by dim**2 chi or process matrix :return: dim**2 by dim**2 chi or process matrix """ return np.diag(chi_matrix.diagonal())
def _mutual_proximity_gaussi_sparse( S: np.ndarray, sample_size: int = 0, test_set_ind: np.ndarray = None, verbose: int = 0, log=None ): """MP gaussi for sparse similarity matrices. Please do not directly use this function, but invoke via mutual_proximity_gaussi() """ n = S.shape[0] self_value = 1 # similarity matrix if test_set_ind is None: train_set_ind = slice(0, n) else: train_set_ind = np.setdiff1d(np.arange(n), test_set_ind) # =========================================================================== # from sklearn.utils.sparsefuncs_fast import csr_mean_variance_axis0 # @UnresolvedImport # mu, var = csr_mean_variance_axis0(S[train_set_ind]) # sd = np.sqrt(var) # del var # =========================================================================== # mean, variance WITHOUT zero values (missing values), ddof=0 if S.diagonal().max() != 1.0 or S.diagonal().min() != 1.0: raise ValueError("Self similarities must be 1.") S_param = S[train_set_ind] # the -1 accounts for self similarities that must be excluded from the calc mu = np.array((S_param.sum(0) - 1.0) / (S_param.getnnz(0) - 1)).ravel() X = S_param X.data **= 2 E1 = np.array((X.sum(0) - 1.0) / (X.getnnz(0) - 1)).ravel() del X, S_param E2 = mu ** 2 va = E1 - E2 del E1, E2 sd = np.sqrt(va) del va S_mp = lil_matrix(S.shape) for i in range(n): if verbose and log and ((i + 1) % 1000 == 0 or i + 1 == n): log.message("MP_gaussi: {} of {}.".format(i + 1, n), flush=True) j_idx = slice(i + 1, n) S_ij = S[i, j_idx].toarray().ravel() # Extract dense rows temporarily S_ji = S[j_idx, i].toarray().ravel() # for vectorization below. p1 = norm.cdf(S_ij, mu[i], sd[i]) # mu, sd broadcasted p1[S_ij == 0] = 0 del S_ij p2 = norm.cdf(S_ji, mu[j_idx], sd[j_idx]) p2[S_ji == 0] = 0 del S_ji tmp = np.empty(n - i) tmp[0] = self_value / 2.0 tmp[1:] = (p1 * p2).ravel() S_mp[i, i:] = tmp del tmp, j_idx S_mp += S_mp.T return S_mp.tocsr()
def _mutual_proximity_gammai_sparse(S: np.ndarray, test_set_ind: np.ndarray = None, verbose: int = 0, log=None): """MP gammai for sparse similarity matrices. Please do not directly use this function, but invoke via mutual_proximity_gammai() """ n = S.shape[0] self_value = 1.0 if test_set_ind is None: train_set_ind = slice(0, n) else: train_set_ind = np.setdiff1d(np.arange(n), test_set_ind) # mean, variance WITH zero values # ======================================================================= # from sklearn.utils.sparsefuncs_fast import csr_mean_variance_axis0 # mu, va = csr_mean_variance_axis0(self.S[train_set_mask]) # ======================================================================= # mean, variance WITHOUT zero values (missing values), ddof=1 if S.diagonal().max() != 1.0 or S.diagonal().min() != 1.0: raise ValueError("Self similarities must be 1.") S_param = S[train_set_ind] # the -1 accounts for self similarities that must be excluded from the calc mu = np.array((S_param.sum(0) - 1) / (S_param.getnnz(0) - 1)).ravel() E2 = mu ** 2 X = S_param.copy() X.data **= 2 n_x = X.getnnz(0) - 1 E1 = np.array((X.sum(0) - 1) / (n_x)).ravel() del X # for an unbiased sample variance va = n_x / (n_x - 1) * (E1 - E2) del E1 A = E2 / va B = va / mu del mu, va, E2 A[A < 0] = np.nan B[B <= 0] = np.nan S_mp = lil_matrix(S.shape, dtype=np.float32) for i in range(n): if verbose and log and ((i + 1) % 1000 == 0 or i + 1 == n): log.message("MP_gammai: {} of {}".format(i + 1, n), flush=True) j_idx = slice(i + 1, n) Dij = S[i, j_idx].toarray().ravel() # Extract dense rows temporarily p1 = _local_gamcdf(Dij, A[i], B[i]) del Dij Dji = S[j_idx, i].toarray().ravel() # for vectorization below. p2 = _local_gamcdf(Dji, A[j_idx], B[j_idx]) del Dji tmp = np.empty(n - i) tmp[0] = self_value / 2.0 tmp[1:] = (p1 * p2).ravel() S_mp[i, i:] = tmp del tmp, j_idx S_mp += S_mp.T return S_mp.tocsr()