def calcHardMergeGap(self, SS, kA, kB): ''' Calculate scalar improvement in ELBO for hard merge of comps kA, kB Does *not* include any entropy. Returns --------- L : scalar ''' m_K = SS.K - 1 m_SS = SuffStatBag(K=SS.K, D=0) m_SS.setField('StartStateCount', SS.StartStateCount.copy(), dims='K') m_SS.setField('TransStateCount', SS.TransStateCount.copy(), dims=('K', 'K')) m_SS.mergeComps(kA, kB) # Create candidate beta vector m_beta = StickBreakUtil.rho2beta(self.rho) m_beta[kA] += m_beta[kB] m_beta = np.delete(m_beta, kB, axis=0) # Create candidate rho and omega vectors m_rho = StickBreakUtil.beta2rho(m_beta, m_K) m_omega = np.delete(self.omega, kB) # Create candidate startTheta m_startTheta = self.startAlpha * m_beta.copy() m_startTheta[:m_K] += m_SS.StartStateCount # Create candidate transTheta m_transTheta = self.alpha * np.tile(m_beta, (m_K, 1)) if self.kappa > 0: m_transTheta[:, :m_K] += self.kappa * np.eye(m_K) m_transTheta[:, :m_K] += m_SS.TransStateCount # Evaluate objective func. for both candidate and current model Lcur = calcELBO_LinearTerms(SS=SS, rho=self.rho, omega=self.omega, startTheta=self.startTheta, transTheta=self.transTheta, alpha=self.alpha, startAlpha=self.startAlpha, gamma=self.gamma, kappa=self.kappa) Lprop = calcELBO_LinearTerms(SS=m_SS, rho=m_rho, omega=m_omega, startTheta=m_startTheta, transTheta=m_transTheta, alpha=self.alpha, startAlpha=self.startAlpha, gamma=self.gamma, kappa=self.kappa) # Note: This gap relies on fact that all nonlinear terms are entropies, return Lprop - Lcur