def confidence_interval(self): """The size of the 1-alpha confidence interval""" coh_var = np.zeros( (self.input.data.shape[0], self.input.data.shape[0], self._L), 'd') for i in range(self.input.data.shape[0]): for j in range(i): if i != j: coh_var[i, j] = tsu.jackknifed_coh_variance( self.spectra[i], self.spectra[j], self.eigs, adaptive=self._adaptive) idx = triu_indices(self.input.data.shape[0], 1) coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj() coh_mat_xform = tsu.normalize_coherence(self.coherence, 2 * self.df - 2) lb = coh_mat_xform + dist.t.ppf(self.alpha / 2, self.df - 1) * np.sqrt(coh_var) ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2, self.df - 1) * np.sqrt(coh_var) # convert this measure with the normalizing function tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb) tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub) return ub - lb
def confidence_interval(self): """The size of the 1-alpha confidence interval""" coh_var = np.zeros((self.input.data.shape[0], self.input.data.shape[0], self._L), 'd') for i in range(self.input.data.shape[0]): for j in range(i): if i != j: coh_var[i, j] = tsu.jackknifed_coh_variance( self.spectra[i], self.spectra[j], self.eigs, adaptive=self._adaptive ) idx = triu_indices(self.input.data.shape[0], 1) coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj() coh_mat_xform = tsu.normalize_coherence(self.coherence, 2 * self.df - 2) lb = coh_mat_xform + dist.t.ppf(self.alpha / 2, self.df - 1) * np.sqrt(coh_var) ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2, self.df - 1) * np.sqrt(coh_var) # convert this measure with the normalizing function tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb) tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub) return ub - lb
""" coh_mat[i, j] = np.abs(sxy)**2 coh_mat[i, j] /= (sxx * syy) csd_mat[i, j] = sxy """ The variance from the different samples is calculated using a jack-knife approach: """ if i != j: coh_var[i, j] = utils.jackknifed_coh_variance( tspectra[i], tspectra[j], eigs, adaptive=True, ) """ This measure is normalized, based on the number of tapers: """ coh_mat_xform = utils.normalize_coherence(coh_mat, 2 * K - 2) """ We calculate 95% confidence intervals based on the jack-knife variance calculation: """
""" coh_mat[i, j] = np.abs(sxy) ** 2 coh_mat[i, j] /= (sxx * syy) csd_mat[i, j] = sxy """ The variance from the different samples is calculated using a jack-knife approach: """ if i != j: coh_var[i, j] = utils.jackknifed_coh_variance( tspectra[i], tspectra[j], eigs, adaptive=True, ) """ This measure is normalized, based on the number of tapers: """ coh_mat_xform = utils.normalize_coherence(coh_mat, 2 * K - 2) """ We calculate 95% confidence intervals based on the jack-knife variance
tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided' ) sxx = alg.mtm_cross_spectrum( tspectra[i], tspectra[i], (w[i], w[i]), sides='onesided' ).real syy = alg.mtm_cross_spectrum( tspectra[j], tspectra[j], (w[i], w[j]), sides='onesided' ).real psd_mat[0,i,j] = sxx psd_mat[1,i,j] = syy coh_mat[i,j] = np.abs(sxy)**2 coh_mat[i,j] /= (sxx * syy) csd_mat[i,j] = sxy if i != j: coh_var[i,j] = utils.jackknifed_coh_variance( tspectra[i], tspectra[j], weights=(w[i], w[j]), last_freq=L ) upper_idc = utils.triu_indices(nseq, k=1) lower_idc = utils.tril_indices(nseq, k=-1) coh_mat[upper_idc] = coh_mat[lower_idc] coh_var[upper_idc] = coh_var[lower_idc] # convert this measure with the normalizing function coh_mat_xform = utils.normalize_coherence(coh_mat, 2*K-2) t025_limit = coh_mat_xform + dist.t.ppf(.025, K-1)*np.sqrt(coh_var) t975_limit = coh_mat_xform + dist.t.ppf(.975, K-1)*np.sqrt(coh_var) utils.normal_coherence_to_unit(t025_limit, 2*K-2, t025_limit) utils.normal_coherence_to_unit(t975_limit, 2*K-2, t975_limit)