def inner_prepare(array): if jacknife_size: return rm_nans(np.random.choice(array, size=(jacknife_size,), replace=True)) else: return rm_nans(array)
def get_t_distro_outlier_bound_estimation(array, background_std): narray = rm_nans(array) low, up = t.interval(0.95, narray.shape[0]-1, np.mean(narray), np.sqrt(np.var(narray)+background_std**2)) up, low = (up-np.mean(narray), np.mean(narray)-low) return max(up, low)
def pull_breakpoints(contingency_list): """ A method to extract breakpoints separating np.array regions with the same value. :param contingency_list: np.array containing regions of identical values :return: list of breakpoint indexes """ no_nans_parsed = rm_nans(contingency_list) contingency = np.lib.pad( no_nans_parsed[:-1] == no_nans_parsed[1:], (1, 0), "constant", constant_values=(True, True) ) nans_contingency = np.zeros(contingency_list.shape).astype(np.bool) nans_contingency[np.logical_not(np.isnan(contingency_list))] = contingency breakpoints = np.nonzero(np.logical_not(nans_contingency))[0].tolist() return breakpoints
def logistic_regression(TF, T0, concentrations, background_std): def get_1p_bounds(mean, std, dof): return t.interval(0.99, dof, mean, std) mask = concentrations == 0.0 vals_at_0 = rm_nans(TF[:, mask, :]) max_capacity = get_1p_bounds(np.mean(vals_at_0), np.sqrt(np.var(vals_at_0) + background_std**2), vals_at_0.shape[0])[1]*1.05 compensation_T0 = -np.log2(max_capacity/T0-1)[:, :, np.newaxis] compensation_TF = -np.log2(max_capacity/TF-1) alphas = compensation_TF - compensation_T0 return alphas
def p_stabilize(array, percentile): p_low = np.percentile(rm_nans(array), percentile) p_high = np.percentile(rm_nans(array), 100-percentile) array[array < p_low] = p_low array[array > p_high] = p_high return array