def _minimize_phi_row( phi_prev, psi_scalars, observed_phi_row, sigma_row, alphas, ns, m, row_idx=None, parallelized=False, shared_array_id=None): phi_next = cp.Variable(phi_prev.shape) phi_final = np.copy(phi_prev) try: phi_prev = np.clip(phi_prev, CLIP_VALUE, None) sample_coef = m / np.clip(sigma_row, CLIP_VALUE, None) bulk_coef = ns / \ np.clip(compute_mixture_sigma( alphas, sigma_row, phi_prev), CLIP_VALUE, None) prob = cp.Problem( cp.Minimize((observed_phi_row - phi_next)**2 @ sample_coef.T + (phi_next @ alphas.T - psi_scalars / ns)**2 @ bulk_coef.T), [phi_next >= 0]) prob.solve() phi_final = np.clip(phi_next.value, 0, None) except (cp.SolverError, TypeError) as e: pass if phi_next.value is None: phi_final = np.clip(phi_prev, 0, None) if parallelized and shared_array_id: phi_tmp = np.ctypeslib.as_array(shared_dict[shared_array_id]) phi_tmp[row_idx, :] = phi_final return phi_final
def _update_n(phi, alpha, psi, sigma): mixture_variance = np.clip(compute_mixture_sigma( alpha, sigma, phi), CLIP_VALUE, None) G = psi.shape[0] psi_hat_term = compute_weighted_norm(phi @ alpha.T, 1 / mixture_variance) observed_psi_term = compute_weighted_norm(psi, 1 / mixture_variance) return np.max(np.roots([psi_hat_term, G, -observed_psi_term]))
def _compute_alpha_LS(alpha_hats, phi_hat, phi, sigma, psis): alpha_LS = np.zeros(alpha_hats.shape) for i in range(alpha_hats.shape[0]): mixture_sigma_diag = np.diag( 1 / np.sqrt(compute_mixture_sigma(alpha_hats[i].reshape(1, -1), sigma, phi_hat)).ravel()) # Note: rcond parameter is set to silence a deprecation warning alpha_LS_i = np.linalg.lstsq( mixture_sigma_diag @ phi, mixture_sigma_diag @ psis[:, i].reshape(-1, 1), rcond=-1)[0].T alpha_LS_i = np.clip(alpha_LS_i, 0, None) alpha_LS[i] = alpha_LS_i / np.sum(alpha_LS_i) return alpha_LS
def _partial_alpha_phi(phi, sigma, alpha, n): G, K = phi.shape mixture_variance = compute_mixture_sigma(alpha, sigma, phi) partial_mv_alpha = _partial_mixture_variance_alpha(phi, sigma, alpha) partial_mv_phi = _partial_mixture_variance_phi(phi, alpha) @np.vectorize def partial_alpha_phi_helper(i, j): g, k = j // K, j % K return n * alpha[0, k] * phi[g, i] / mixture_variance[g, 0] + \ partial_mv_alpha[g, i] * partial_mv_phi[g, k] / \ (2 * mixture_variance[g, 0]**2) return partial_alpha_phi_helper(*np.mgrid[0:K, 0:G * K])
def _minimize_alpha(alpha_prev, sigma, phi, psi, n): alpha_next = cp.Variable(alpha_prev.shape) alpha = np.copy(alpha_prev) try: coef = n / np.clip(compute_mixture_sigma(alpha_prev, sigma, phi), CLIP_VALUE, None) prob = cp.Problem(cp.Minimize( coef.T @ (phi @ alpha_next.T - psi / n)**2, [alpha_next >= 0, cp.sum(alpha_next) == 1])) prob.solve() alpha = np.clip(alpha_next.value, 0, None) except (cp.SolverError, TypeError) as e: pass if alpha_next.value is None: return alpha_prev return alpha / np.sum(alpha)
def _partial_phi_phi(phi, sigma, alpha, n, m): G, K = phi.shape mixture_variance = compute_mixture_sigma(alpha, sigma, phi) alpha_term = np.repeat( n * np.kron(alpha, alpha).reshape(K, K)[:, :, np.newaxis], G, axis=2) / mixture_variance.reshape(1, 1, -1) partial_mv_phi = _partial_mixture_variance_phi(phi, alpha) phi_term = np.dstack([ np.kron(partial_mv_phi[i], partial_mv_phi[i]).reshape(K, K) / (2 * mixture_variance[i]**2) for i in range(G) ]) return scipy.linalg.block_diag( * [np.squeeze(subarr) for subarr in np.dsplit(alpha_term + phi_term, G)]) + np.diag( (m / sigma).reshape(-1))
def cross_protocol_inverse_observed_fisher(phi, sigma, m, alpha, n, psi): """Finds an adjusted inverse observed fisher information matrix for cross protocol experiments. Takes in phi, sigma, psi matrices filtered by filter_droplet_to_facs/filter_facs_to_droplet.""" G = phi.shape[0] mixture_variance = compute_mixture_sigma(alpha, sigma, phi) z_scores = (phi @ alpha.T - psi / n) / np.sqrt(mixture_variance / n) z_tail = (z_scores**2).sum() - G filter_length = max(np.ceil(500 - 400 * max(z_tail, 0)), 100) filter_idxs = np.arange(G) if G < 500 else np.random.choice( G, min(filter_length, G), replace=False) return inverse_observed_fisher(phi[filter_idxs], np.clip(sigma[filter_idxs], 1, None), m, alpha, n)
def _partial_n_phi(phi, sigma, alpha, n): mixture_variance = compute_mixture_sigma(alpha, sigma, phi) partial_mv_phi = _partial_mixture_variance_phi(phi, alpha) return (np.kron((phi @ alpha.T) / mixture_variance, alpha) + partial_mv_phi / (2 * n * mixture_variance)).reshape(1, -1)
def _partial_alpha_n(phi, sigma, alpha, n): mixture_variance = compute_mixture_sigma(alpha, sigma, phi) partial_mv_alpha = _partial_mixture_variance_alpha(phi, sigma, alpha) return (phi / mixture_variance).T @ (phi @ alpha.T) + \ np.sum(partial_mv_alpha / mixture_variance, axis=0).reshape(-1, 1) / (2 * n)
def _partial_n_n(phi, sigma, alpha, n): mixture_variance = compute_mixture_sigma(alpha, sigma, phi) G = phi.shape[0] return (1 - G / 2) / n**2 + \ np.sum((phi @ alpha.T)**2 / mixture_variance) / n
def _partial_alpha_alpha(phi, sigma, alpha, n): mixture_variance = compute_mixture_sigma(alpha, sigma, phi) partial_mv_alpha = _partial_mixture_variance_alpha(phi, sigma, alpha) return n * phi.T @ np.diag(1 / mixture_variance.reshape(-1)) @ phi + \ partial_mv_alpha.T @ np.diag( 1 / mixture_variance.reshape(-1)**2) @ partial_mv_alpha / 2